blob: b6c17ec0f490037c681233ed490b7efbe12a1d24 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053045#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070046#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053047
Asutosh Das36c2e922015-12-01 12:19:58 +053048#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080049#define CORE_POWER 0x0
50#define CORE_SW_RST (1 << 7)
51
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070052#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080053
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053063
64#define CORE_VERSION_MAJOR_MASK 0xF0000000
65#define CORE_VERSION_MAJOR_SHIFT 28
66
Asutosh Das0ef24812012-12-18 16:14:02 +053067#define CORE_HC_MODE 0x78
68#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070069#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053070
Asutosh Das0ef24812012-12-18 16:14:02 +053071#define CORE_PWRCTL_BUS_OFF 0x01
72#define CORE_PWRCTL_BUS_ON (1 << 1)
73#define CORE_PWRCTL_IO_LOW (1 << 2)
74#define CORE_PWRCTL_IO_HIGH (1 << 3)
75
76#define CORE_PWRCTL_BUS_SUCCESS 0x01
77#define CORE_PWRCTL_BUS_FAIL (1 << 1)
78#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
79#define CORE_PWRCTL_IO_FAIL (1 << 3)
80
81#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070082#define MAX_PHASES 16
83
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070084#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070085#define CORE_DLL_EN (1 << 16)
86#define CORE_CDR_EN (1 << 17)
87#define CORE_CK_OUT_EN (1 << 18)
88#define CORE_CDR_EXT_EN (1 << 19)
89#define CORE_DLL_PDN (1 << 29)
90#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070093#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094
Krishna Konda46fd1432014-10-30 21:13:27 -070095#define CORE_CLK_PWRSAVE (1 << 1)
96#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
97#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
98#define CORE_HC_MCLK_SEL_MASK (3 << 8)
99#define CORE_HC_AUTO_CMD21_EN (1 << 6)
100#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700101#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700102#define CORE_HC_SELECT_IN_EN (1 << 18)
103#define CORE_HC_SELECT_IN_HS400 (6 << 19)
104#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700105#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700106
Pavan Anamula691dd592015-08-25 16:11:20 +0530107#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
108#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530109#define CORE_ONE_MID_EN (1 << 25)
110
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530111#define CORE_8_BIT_SUPPORT (1 << 18)
112#define CORE_3_3V_SUPPORT (1 << 24)
113#define CORE_3_0V_SUPPORT (1 << 25)
114#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300115#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700116
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700117#define CORE_CSR_CDC_CTLR_CFG0 0x130
118#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
119#define CORE_HW_AUTOCAL_ENA (1 << 17)
120
121#define CORE_CSR_CDC_CTLR_CFG1 0x134
122#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
123#define CORE_TIMER_ENA (1 << 16)
124
125#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
126#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
127#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
128#define CORE_CDC_OFFSET_CFG 0x14C
129#define CORE_CSR_CDC_DELAY_CFG 0x150
130#define CORE_CDC_SLAVE_DDA_CFG 0x160
131#define CORE_CSR_CDC_STATUS0 0x164
132#define CORE_CALIBRATION_DONE (1 << 0)
133
134#define CORE_CDC_ERROR_CODE_MASK 0x7000000
135
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300136#define CQ_CMD_DBG_RAM 0x110
137#define CQ_CMD_DBG_RAM_WA 0x150
138#define CQ_CMD_DBG_RAM_OL 0x154
139
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700140#define CORE_CSR_CDC_GEN_CFG 0x178
141#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
142#define CORE_CDC_SWITCH_RC_EN (1 << 1)
143
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700144#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530145#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700146#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530147
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700148#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530149#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700151#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800152#define CORE_FLL_CYCLE_CNT (1 << 18)
153#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700154
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530155#define DDR_CONFIG_POR_VAL 0x80040853
156#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
157#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700158#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700159
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700160/* 512 descriptors */
161#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530162#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530163
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700164#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800165#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700166
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700167#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530168#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169
Krishna Konda96e6b112013-10-28 15:25:03 -0700170#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200171#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200172#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700173
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530174struct sdhci_msm_offset {
175 u32 CORE_MCI_DATA_CNT;
176 u32 CORE_MCI_STATUS;
177 u32 CORE_MCI_FIFO_CNT;
178 u32 CORE_MCI_VERSION;
179 u32 CORE_GENERICS;
180 u32 CORE_TESTBUS_CONFIG;
181 u32 CORE_TESTBUS_SEL2_BIT;
182 u32 CORE_TESTBUS_ENA;
183 u32 CORE_TESTBUS_SEL2;
184 u32 CORE_PWRCTL_STATUS;
185 u32 CORE_PWRCTL_MASK;
186 u32 CORE_PWRCTL_CLEAR;
187 u32 CORE_PWRCTL_CTL;
188 u32 CORE_SDCC_DEBUG_REG;
189 u32 CORE_DLL_CONFIG;
190 u32 CORE_DLL_STATUS;
191 u32 CORE_VENDOR_SPEC;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
193 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
194 u32 CORE_VENDOR_SPEC_FUNC2;
195 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
196 u32 CORE_DDR_200_CFG;
197 u32 CORE_VENDOR_SPEC3;
198 u32 CORE_DLL_CONFIG_2;
199 u32 CORE_DDR_CONFIG;
200 u32 CORE_DDR_CONFIG_2;
201};
202
203struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
204 .CORE_MCI_DATA_CNT = 0x35C,
205 .CORE_MCI_STATUS = 0x324,
206 .CORE_MCI_FIFO_CNT = 0x308,
207 .CORE_MCI_VERSION = 0x318,
208 .CORE_GENERICS = 0x320,
209 .CORE_TESTBUS_CONFIG = 0x32C,
210 .CORE_TESTBUS_SEL2_BIT = 3,
211 .CORE_TESTBUS_ENA = (1 << 31),
212 .CORE_TESTBUS_SEL2 = (1 << 3),
213 .CORE_PWRCTL_STATUS = 0x240,
214 .CORE_PWRCTL_MASK = 0x244,
215 .CORE_PWRCTL_CLEAR = 0x248,
216 .CORE_PWRCTL_CTL = 0x24C,
217 .CORE_SDCC_DEBUG_REG = 0x358,
218 .CORE_DLL_CONFIG = 0x200,
219 .CORE_DLL_STATUS = 0x208,
220 .CORE_VENDOR_SPEC = 0x20C,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
222 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
223 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
224 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
225 .CORE_DDR_200_CFG = 0x224,
226 .CORE_VENDOR_SPEC3 = 0x250,
227 .CORE_DLL_CONFIG_2 = 0x254,
228 .CORE_DDR_CONFIG = 0x258,
229 .CORE_DDR_CONFIG_2 = 0x25C,
230};
231
232struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
233 .CORE_MCI_DATA_CNT = 0x30,
234 .CORE_MCI_STATUS = 0x34,
235 .CORE_MCI_FIFO_CNT = 0x44,
236 .CORE_MCI_VERSION = 0x050,
237 .CORE_GENERICS = 0x70,
238 .CORE_TESTBUS_CONFIG = 0x0CC,
239 .CORE_TESTBUS_SEL2_BIT = 4,
240 .CORE_TESTBUS_ENA = (1 << 3),
241 .CORE_TESTBUS_SEL2 = (1 << 4),
242 .CORE_PWRCTL_STATUS = 0xDC,
243 .CORE_PWRCTL_MASK = 0xE0,
244 .CORE_PWRCTL_CLEAR = 0xE4,
245 .CORE_PWRCTL_CTL = 0xE8,
246 .CORE_SDCC_DEBUG_REG = 0x124,
247 .CORE_DLL_CONFIG = 0x100,
248 .CORE_DLL_STATUS = 0x108,
249 .CORE_VENDOR_SPEC = 0x10C,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
251 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
252 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
253 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
254 .CORE_DDR_200_CFG = 0x184,
255 .CORE_VENDOR_SPEC3 = 0x1B0,
256 .CORE_DLL_CONFIG_2 = 0x1B4,
257 .CORE_DDR_CONFIG = 0x1B8,
258 .CORE_DDR_CONFIG_2 = 0x1BC,
259};
260
261u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
262{
263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
264 struct sdhci_msm_host *msm_host = pltfm_host->priv;
265 void __iomem *base_addr;
266
267 if (msm_host->mci_removed)
268 base_addr = host->ioaddr;
269 else
270 base_addr = msm_host->core_mem;
271
272 return readb_relaxed(base_addr + offset);
273}
274
275u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
276{
277 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
278 struct sdhci_msm_host *msm_host = pltfm_host->priv;
279 void __iomem *base_addr;
280
281 if (msm_host->mci_removed)
282 base_addr = host->ioaddr;
283 else
284 base_addr = msm_host->core_mem;
285
286 return readl_relaxed(base_addr + offset);
287}
288
289void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
290{
291 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
292 struct sdhci_msm_host *msm_host = pltfm_host->priv;
293 void __iomem *base_addr;
294
295 if (msm_host->mci_removed)
296 base_addr = host->ioaddr;
297 else
298 base_addr = msm_host->core_mem;
299
300 writeb_relaxed(val, base_addr + offset);
301}
302
303void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
304{
305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
306 struct sdhci_msm_host *msm_host = pltfm_host->priv;
307 void __iomem *base_addr;
308
309 if (msm_host->mci_removed)
310 base_addr = host->ioaddr;
311 else
312 base_addr = msm_host->core_mem;
313
314 writel_relaxed(val, base_addr + offset);
315}
316
Ritesh Harjani82124772014-11-04 15:34:00 +0530317/* Timeout value to avoid infinite waiting for pwr_irq */
318#define MSM_PWR_IRQ_TIMEOUT_MS 5000
319
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700320static const u32 tuning_block_64[] = {
321 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
322 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
323 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
324 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
325};
326
327static const u32 tuning_block_128[] = {
328 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
329 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
330 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
331 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
332 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
333 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
334 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
335 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
336};
Asutosh Das0ef24812012-12-18 16:14:02 +0530337
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700338/* global to hold each slot instance for debug */
339static struct sdhci_msm_host *sdhci_slot[2];
340
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700341static int disable_slots;
342/* root can write, others read */
343module_param(disable_slots, int, S_IRUGO|S_IWUSR);
344
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530345static bool nocmdq;
346module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
347
Asutosh Das0ef24812012-12-18 16:14:02 +0530348enum vdd_io_level {
349 /* set vdd_io_data->low_vol_level */
350 VDD_IO_LOW,
351 /* set vdd_io_data->high_vol_level */
352 VDD_IO_HIGH,
353 /*
354 * set whatever there in voltage_level (third argument) of
355 * sdhci_msm_set_vdd_io_vol() function.
356 */
357 VDD_IO_SET_LEVEL,
358};
359
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700360/* MSM platform specific tuning */
361static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
362 u8 poll)
363{
364 int rc = 0;
365 u32 wait_cnt = 50;
366 u8 ck_out_en = 0;
367 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530368 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
369 struct sdhci_msm_host *msm_host = pltfm_host->priv;
370 const struct sdhci_msm_offset *msm_host_offset =
371 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700372
373 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530374 ck_out_en = !!(readl_relaxed(host->ioaddr +
375 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700376
377 while (ck_out_en != poll) {
378 if (--wait_cnt == 0) {
379 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
380 mmc_hostname(mmc), __func__, poll);
381 rc = -ETIMEDOUT;
382 goto out;
383 }
384 udelay(1);
385
386 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530387 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700388 }
389out:
390 return rc;
391}
392
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530393/*
394 * Enable CDR to track changes of DAT lines and adjust sampling
395 * point according to voltage/temperature variations
396 */
397static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
398{
399 int rc = 0;
400 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530401 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
402 struct sdhci_msm_host *msm_host = pltfm_host->priv;
403 const struct sdhci_msm_offset *msm_host_offset =
404 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530405
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530406 config = readl_relaxed(host->ioaddr +
407 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530408 config |= CORE_CDR_EN;
409 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530410 writel_relaxed(config, host->ioaddr +
411 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530412
413 rc = msm_dll_poll_ck_out_en(host, 0);
414 if (rc)
415 goto err;
416
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530417 writel_relaxed((readl_relaxed(host->ioaddr +
418 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
419 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530420
421 rc = msm_dll_poll_ck_out_en(host, 1);
422 if (rc)
423 goto err;
424 goto out;
425err:
426 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
427out:
428 return rc;
429}
430
431static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
432 *attr, const char *buf, size_t count)
433{
434 struct sdhci_host *host = dev_get_drvdata(dev);
435 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
436 struct sdhci_msm_host *msm_host = pltfm_host->priv;
437 u32 tmp;
438 unsigned long flags;
439
440 if (!kstrtou32(buf, 0, &tmp)) {
441 spin_lock_irqsave(&host->lock, flags);
442 msm_host->en_auto_cmd21 = !!tmp;
443 spin_unlock_irqrestore(&host->lock, flags);
444 }
445 return count;
446}
447
448static ssize_t show_auto_cmd21(struct device *dev,
449 struct device_attribute *attr, char *buf)
450{
451 struct sdhci_host *host = dev_get_drvdata(dev);
452 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
453 struct sdhci_msm_host *msm_host = pltfm_host->priv;
454
455 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
456}
457
458/* MSM auto-tuning handler */
459static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
460 bool enable,
461 u32 type)
462{
463 int rc = 0;
464 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
465 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530466 const struct sdhci_msm_offset *msm_host_offset =
467 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530468 u32 val = 0;
469
470 if (!msm_host->en_auto_cmd21)
471 return 0;
472
473 if (type == MMC_SEND_TUNING_BLOCK_HS200)
474 val = CORE_HC_AUTO_CMD21_EN;
475 else
476 return 0;
477
478 if (enable) {
479 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530480 writel_relaxed(readl_relaxed(host->ioaddr +
481 msm_host_offset->CORE_VENDOR_SPEC) | val,
482 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530483 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530484 writel_relaxed(readl_relaxed(host->ioaddr +
485 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
486 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530487 }
488 return rc;
489}
490
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700491static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
492{
493 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530494 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
495 struct sdhci_msm_host *msm_host = pltfm_host->priv;
496 const struct sdhci_msm_offset *msm_host_offset =
497 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700498 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
499 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
500 0x8};
501 unsigned long flags;
502 u32 config;
503 struct mmc_host *mmc = host->mmc;
504
505 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
506 spin_lock_irqsave(&host->lock, flags);
507
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530508 config = readl_relaxed(host->ioaddr +
509 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700510 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
511 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530512 writel_relaxed(config, host->ioaddr +
513 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700514
515 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
516 rc = msm_dll_poll_ck_out_en(host, 0);
517 if (rc)
518 goto err_out;
519
520 /*
521 * Write the selected DLL clock output phase (0 ... 15)
522 * to CDR_SELEXT bit field of DLL_CONFIG register.
523 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530524 writel_relaxed(((readl_relaxed(host->ioaddr +
525 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700526 & ~(0xF << 20))
527 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530528 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700529
530 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530531 writel_relaxed((readl_relaxed(host->ioaddr +
532 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
533 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700534
535 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
536 rc = msm_dll_poll_ck_out_en(host, 1);
537 if (rc)
538 goto err_out;
539
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530540 config = readl_relaxed(host->ioaddr +
541 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700542 config |= CORE_CDR_EN;
543 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530544 writel_relaxed(config, host->ioaddr +
545 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700546 goto out;
547
548err_out:
549 pr_err("%s: %s: Failed to set DLL phase: %d\n",
550 mmc_hostname(mmc), __func__, phase);
551out:
552 spin_unlock_irqrestore(&host->lock, flags);
553 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
554 return rc;
555}
556
557/*
558 * Find out the greatest range of consecuitive selected
559 * DLL clock output phases that can be used as sampling
560 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700561 * timing mode) or for eMMC4.5 card read operation (in
562 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700563 * Select the 3/4 of the range and configure the DLL with the
564 * selected DLL clock output phase.
565 */
566
567static int msm_find_most_appropriate_phase(struct sdhci_host *host,
568 u8 *phase_table, u8 total_phases)
569{
570 int ret;
571 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
572 u8 phases_per_row[MAX_PHASES] = {0};
573 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
574 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
575 bool phase_0_found = false, phase_15_found = false;
576 struct mmc_host *mmc = host->mmc;
577
578 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
579 if (!total_phases || (total_phases > MAX_PHASES)) {
580 pr_err("%s: %s: invalid argument: total_phases=%d\n",
581 mmc_hostname(mmc), __func__, total_phases);
582 return -EINVAL;
583 }
584
585 for (cnt = 0; cnt < total_phases; cnt++) {
586 ranges[row_index][col_index] = phase_table[cnt];
587 phases_per_row[row_index] += 1;
588 col_index++;
589
590 if ((cnt + 1) == total_phases) {
591 continue;
592 /* check if next phase in phase_table is consecutive or not */
593 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
594 row_index++;
595 col_index = 0;
596 }
597 }
598
599 if (row_index >= MAX_PHASES)
600 return -EINVAL;
601
602 /* Check if phase-0 is present in first valid window? */
603 if (!ranges[0][0]) {
604 phase_0_found = true;
605 phase_0_raw_index = 0;
606 /* Check if cycle exist between 2 valid windows */
607 for (cnt = 1; cnt <= row_index; cnt++) {
608 if (phases_per_row[cnt]) {
609 for (i = 0; i < phases_per_row[cnt]; i++) {
610 if (ranges[cnt][i] == 15) {
611 phase_15_found = true;
612 phase_15_raw_index = cnt;
613 break;
614 }
615 }
616 }
617 }
618 }
619
620 /* If 2 valid windows form cycle then merge them as single window */
621 if (phase_0_found && phase_15_found) {
622 /* number of phases in raw where phase 0 is present */
623 u8 phases_0 = phases_per_row[phase_0_raw_index];
624 /* number of phases in raw where phase 15 is present */
625 u8 phases_15 = phases_per_row[phase_15_raw_index];
626
627 if (phases_0 + phases_15 >= MAX_PHASES)
628 /*
629 * If there are more than 1 phase windows then total
630 * number of phases in both the windows should not be
631 * more than or equal to MAX_PHASES.
632 */
633 return -EINVAL;
634
635 /* Merge 2 cyclic windows */
636 i = phases_15;
637 for (cnt = 0; cnt < phases_0; cnt++) {
638 ranges[phase_15_raw_index][i] =
639 ranges[phase_0_raw_index][cnt];
640 if (++i >= MAX_PHASES)
641 break;
642 }
643
644 phases_per_row[phase_0_raw_index] = 0;
645 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
646 }
647
648 for (cnt = 0; cnt <= row_index; cnt++) {
649 if (phases_per_row[cnt] > curr_max) {
650 curr_max = phases_per_row[cnt];
651 selected_row_index = cnt;
652 }
653 }
654
655 i = ((curr_max * 3) / 4);
656 if (i)
657 i--;
658
659 ret = (int)ranges[selected_row_index][i];
660
661 if (ret >= MAX_PHASES) {
662 ret = -EINVAL;
663 pr_err("%s: %s: invalid phase selected=%d\n",
664 mmc_hostname(mmc), __func__, ret);
665 }
666
667 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
668 return ret;
669}
670
671static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
672{
673 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530674 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
675 struct sdhci_msm_host *msm_host = pltfm_host->priv;
676 const struct sdhci_msm_offset *msm_host_offset =
677 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700678
679 /* Program the MCLK value to MCLK_FREQ bit field */
680 if (host->clock <= 112000000)
681 mclk_freq = 0;
682 else if (host->clock <= 125000000)
683 mclk_freq = 1;
684 else if (host->clock <= 137000000)
685 mclk_freq = 2;
686 else if (host->clock <= 150000000)
687 mclk_freq = 3;
688 else if (host->clock <= 162000000)
689 mclk_freq = 4;
690 else if (host->clock <= 175000000)
691 mclk_freq = 5;
692 else if (host->clock <= 187000000)
693 mclk_freq = 6;
694 else if (host->clock <= 200000000)
695 mclk_freq = 7;
696
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530697 writel_relaxed(((readl_relaxed(host->ioaddr +
698 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700699 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530700 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700701}
702
703/* Initialize the DLL (Programmable Delay Line ) */
704static int msm_init_cm_dll(struct sdhci_host *host)
705{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800706 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
707 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530708 const struct sdhci_msm_offset *msm_host_offset =
709 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700710 struct mmc_host *mmc = host->mmc;
711 int rc = 0;
712 unsigned long flags;
713 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530714 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700715
716 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
717 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530718 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
719 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530720 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700721 /*
722 * Make sure that clock is always enabled when DLL
723 * tuning is in progress. Keeping PWRSAVE ON may
724 * turn off the clock. So let's disable the PWRSAVE
725 * here and re-enable it once tuning is completed.
726 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530727 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530728 writel_relaxed((readl_relaxed(host->ioaddr +
729 msm_host_offset->CORE_VENDOR_SPEC)
730 & ~CORE_CLK_PWRSAVE), host->ioaddr +
731 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530732 curr_pwrsave = false;
733 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700734
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800735 if (msm_host->use_updated_dll_reset) {
736 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530737 writel_relaxed((readl_relaxed(host->ioaddr +
738 msm_host_offset->CORE_DLL_CONFIG)
739 & ~CORE_CK_OUT_EN), host->ioaddr +
740 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800741
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530742 writel_relaxed((readl_relaxed(host->ioaddr +
743 msm_host_offset->CORE_DLL_CONFIG_2)
744 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
745 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800746 }
747
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700748 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530749 writel_relaxed((readl_relaxed(host->ioaddr +
750 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
751 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700752
753 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530754 writel_relaxed((readl_relaxed(host->ioaddr +
755 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
756 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700757 msm_cm_dll_set_freq(host);
758
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800759 if (msm_host->use_updated_dll_reset) {
760 u32 mclk_freq = 0;
761
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530762 if ((readl_relaxed(host->ioaddr +
763 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800764 & CORE_FLL_CYCLE_CNT))
765 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
766 else
767 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
768
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530769 writel_relaxed(((readl_relaxed(host->ioaddr +
770 msm_host_offset->CORE_DLL_CONFIG_2)
771 & ~(0xFF << 10)) | (mclk_freq << 10)),
772 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800773 /* wait for 5us before enabling DLL clock */
774 udelay(5);
775 }
776
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700777 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530778 writel_relaxed((readl_relaxed(host->ioaddr +
779 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
780 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700781
782 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530783 writel_relaxed((readl_relaxed(host->ioaddr +
784 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
785 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700786
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800787 if (msm_host->use_updated_dll_reset) {
788 msm_cm_dll_set_freq(host);
789 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530790 writel_relaxed((readl_relaxed(host->ioaddr +
791 msm_host_offset->CORE_DLL_CONFIG_2)
792 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
793 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800794 }
795
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700796 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530797 writel_relaxed((readl_relaxed(host->ioaddr +
798 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
799 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700800
801 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530802 writel_relaxed((readl_relaxed(host->ioaddr +
803 msm_host_offset->CORE_DLL_CONFIG)
804 | CORE_CK_OUT_EN), host->ioaddr +
805 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700806
807 wait_cnt = 50;
808 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530809 while (!(readl_relaxed(host->ioaddr +
810 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700811 /* max. wait for 50us sec for LOCK bit to be set */
812 if (--wait_cnt == 0) {
813 pr_err("%s: %s: DLL failed to LOCK\n",
814 mmc_hostname(mmc), __func__);
815 rc = -ETIMEDOUT;
816 goto out;
817 }
818 /* wait for 1us before polling again */
819 udelay(1);
820 }
821
822out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530823 /* Restore the correct PWRSAVE state */
824 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530825 u32 reg = readl_relaxed(host->ioaddr +
826 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530827
828 if (prev_pwrsave)
829 reg |= CORE_CLK_PWRSAVE;
830 else
831 reg &= ~CORE_CLK_PWRSAVE;
832
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530833 writel_relaxed(reg, host->ioaddr +
834 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530835 }
836
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700837 spin_unlock_irqrestore(&host->lock, flags);
838 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
839 return rc;
840}
841
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700842static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
843{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700844 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700845 int ret = 0;
846 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530847 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
848 struct sdhci_msm_host *msm_host = pltfm_host->priv;
849 const struct sdhci_msm_offset *msm_host_offset =
850 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700851
852 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
853
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700854 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530855 writel_relaxed((readl_relaxed(host->ioaddr +
856 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700857 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530858 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700859
860 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
861 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
862 & ~CORE_CDC_SWITCH_BYPASS_OFF),
863 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
864
865 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
866 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
867 | CORE_CDC_SWITCH_RC_EN),
868 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
869
870 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530871 writel_relaxed((readl_relaxed(host->ioaddr +
872 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700873 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530874 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700875
876 /*
877 * Perform CDC Register Initialization Sequence
878 *
879 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
880 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
881 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
882 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
883 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
884 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
885 * CORE_CSR_CDC_DELAY_CFG 0x3AC
886 * CORE_CDC_OFFSET_CFG 0x0
887 * CORE_CDC_SLAVE_DDA_CFG 0x16334
888 */
889
890 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
891 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
892 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
893 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
894 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
895 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700896 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700897 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
898 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
899
900 /* CDC HW Calibration */
901
902 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
903 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
904 | CORE_SW_TRIG_FULL_CALIB),
905 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
906
907 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
908 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
909 & ~CORE_SW_TRIG_FULL_CALIB),
910 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
911
912 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
913 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
914 | CORE_HW_AUTOCAL_ENA),
915 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
916
917 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
918 writel_relaxed((readl_relaxed(host->ioaddr +
919 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
920 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
921
922 mb();
923
924 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700925 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
926 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
927
928 if (ret == -ETIMEDOUT) {
929 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700930 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700931 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700932 }
933
934 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
935 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
936 & CORE_CDC_ERROR_CODE_MASK;
937 if (cdc_err) {
938 pr_err("%s: %s: CDC Error Code %d\n",
939 mmc_hostname(host->mmc), __func__, cdc_err);
940 ret = -EINVAL;
941 goto out;
942 }
943
944 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530945 writel_relaxed((readl_relaxed(host->ioaddr +
946 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700947 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530948 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700949out:
950 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
951 __func__, ret);
952 return ret;
953}
954
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700955static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
956{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530957 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
958 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530959 const struct sdhci_msm_offset *msm_host_offset =
960 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530961 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700962 int ret = 0;
963
964 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
965
966 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530967 * Reprogramming the value in case it might have been modified by
968 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700969 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700970 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530971 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
972 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700973 } else {
974 ddr_config = DDR_CONFIG_POR_VAL &
975 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
976 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530977 writel_relaxed(ddr_config, host->ioaddr +
978 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700979 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700980
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530981 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530982 writel_relaxed((readl_relaxed(host->ioaddr +
983 msm_host_offset->CORE_DDR_200_CFG)
984 | CORE_CMDIN_RCLK_EN), host->ioaddr +
985 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530986
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700987 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530988 writel_relaxed((readl_relaxed(host->ioaddr +
989 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700990 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530991 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700992
993 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530994 ret = readl_poll_timeout(host->ioaddr +
995 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700996 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
997
998 if (ret == -ETIMEDOUT) {
999 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1000 mmc_hostname(host->mmc), __func__);
1001 goto out;
1002 }
1003
Ritesh Harjani764065e2015-05-13 14:14:45 +05301004 /*
1005 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1006 * when MCLK is gated OFF, it is not gated for less than 0.5us
1007 * and MCLK must be switched on for at-least 1us before DATA
1008 * starts coming. Controllers with 14lpp tech DLL cannot
1009 * guarantee above requirement. So PWRSAVE_DLL should not be
1010 * turned on for host controllers using this DLL.
1011 */
1012 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301013 writel_relaxed((readl_relaxed(host->ioaddr +
1014 msm_host_offset->CORE_VENDOR_SPEC3)
1015 | CORE_PWRSAVE_DLL), host->ioaddr +
1016 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001017 mb();
1018out:
1019 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1020 __func__, ret);
1021 return ret;
1022}
1023
Ritesh Harjaniea709662015-05-27 15:40:24 +05301024static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1025{
1026 int ret = 0;
1027 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1028 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1029 struct mmc_host *mmc = host->mmc;
1030
1031 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1032
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301033 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1034 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301035 mmc_hostname(mmc));
1036 return -EINVAL;
1037 }
1038
1039 if (msm_host->calibration_done ||
1040 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1041 return 0;
1042 }
1043
1044 /*
1045 * Reset the tuning block.
1046 */
1047 ret = msm_init_cm_dll(host);
1048 if (ret)
1049 goto out;
1050
1051 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1052out:
1053 if (!ret)
1054 msm_host->calibration_done = true;
1055 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1056 __func__, ret);
1057 return ret;
1058}
1059
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001060static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1061{
1062 int ret = 0;
1063 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1064 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301065 const struct sdhci_msm_offset *msm_host_offset =
1066 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001067
1068 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1069
1070 /*
1071 * Retuning in HS400 (DDR mode) will fail, just reset the
1072 * tuning block and restore the saved tuning phase.
1073 */
1074 ret = msm_init_cm_dll(host);
1075 if (ret)
1076 goto out;
1077
1078 /* Set the selected phase in delay line hw block */
1079 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1080 if (ret)
1081 goto out;
1082
Krishna Konda0e8efba2014-06-23 14:50:38 -07001083 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301084 writel_relaxed((readl_relaxed(host->ioaddr +
1085 msm_host_offset->CORE_DLL_CONFIG)
1086 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1087 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001088
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001089 if (msm_host->use_cdclp533)
1090 /* Calibrate CDCLP533 DLL HW */
1091 ret = sdhci_msm_cdclp533_calibration(host);
1092 else
1093 /* Calibrate CM_DLL_SDC4 HW */
1094 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1095out:
1096 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1097 __func__, ret);
1098 return ret;
1099}
1100
Krishna Konda96e6b112013-10-28 15:25:03 -07001101static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1102 u8 drv_type)
1103{
1104 struct mmc_command cmd = {0};
1105 struct mmc_request mrq = {NULL};
1106 struct mmc_host *mmc = host->mmc;
1107 u8 val = ((drv_type << 4) | 2);
1108
1109 cmd.opcode = MMC_SWITCH;
1110 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1111 (EXT_CSD_HS_TIMING << 16) |
1112 (val << 8) |
1113 EXT_CSD_CMD_SET_NORMAL;
1114 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1115 /* 1 sec */
1116 cmd.busy_timeout = 1000 * 1000;
1117
1118 memset(cmd.resp, 0, sizeof(cmd.resp));
1119 cmd.retries = 3;
1120
1121 mrq.cmd = &cmd;
1122 cmd.data = NULL;
1123
1124 mmc_wait_for_req(mmc, &mrq);
1125 pr_debug("%s: %s: set card drive type to %d\n",
1126 mmc_hostname(mmc), __func__,
1127 drv_type);
1128}
1129
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001130int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1131{
1132 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301133 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001134 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135 const u32 *tuning_block_pattern = tuning_block_64;
1136 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1137 int rc;
1138 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301139 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001140 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1141 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001142 u8 drv_type = 0;
1143 bool drv_type_changed = false;
1144 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301145 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301146
1147 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001148 * Tuning is required for SDR104, HS200 and HS400 cards and
1149 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301150 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001151 if (host->clock <= CORE_FREQ_100MHZ ||
1152 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1153 (ios.timing == MMC_TIMING_MMC_HS200) ||
1154 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301155 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001156
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301157 /*
1158 * Don't allow re-tuning for CRC errors observed for any commands
1159 * that are sent during tuning sequence itself.
1160 */
1161 if (msm_host->tuning_in_progress)
1162 return 0;
1163 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001164 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001165
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001166 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001167 if (msm_host->tuning_done && !msm_host->calibration_done &&
1168 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001169 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001170 spin_lock_irqsave(&host->lock, flags);
1171 if (!rc)
1172 msm_host->calibration_done = true;
1173 spin_unlock_irqrestore(&host->lock, flags);
1174 goto out;
1175 }
1176
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001177 spin_lock_irqsave(&host->lock, flags);
1178
1179 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1180 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1181 tuning_block_pattern = tuning_block_128;
1182 size = sizeof(tuning_block_128);
1183 }
1184 spin_unlock_irqrestore(&host->lock, flags);
1185
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001186 data_buf = kmalloc(size, GFP_KERNEL);
1187 if (!data_buf) {
1188 rc = -ENOMEM;
1189 goto out;
1190 }
1191
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301192retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001193 tuned_phase_cnt = 0;
1194
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301195 /* first of all reset the tuning block */
1196 rc = msm_init_cm_dll(host);
1197 if (rc)
1198 goto kfree;
1199
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001200 phase = 0;
1201 do {
1202 struct mmc_command cmd = {0};
1203 struct mmc_data data = {0};
1204 struct mmc_request mrq = {
1205 .cmd = &cmd,
1206 .data = &data
1207 };
1208 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301209 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001210
1211 /* set the phase in delay line hw block */
1212 rc = msm_config_cm_dll_phase(host, phase);
1213 if (rc)
1214 goto kfree;
1215
1216 cmd.opcode = opcode;
1217 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1218
1219 data.blksz = size;
1220 data.blocks = 1;
1221 data.flags = MMC_DATA_READ;
1222 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1223
1224 data.sg = &sg;
1225 data.sg_len = 1;
1226 sg_init_one(&sg, data_buf, size);
1227 memset(data_buf, 0, size);
1228 mmc_wait_for_req(mmc, &mrq);
1229
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301230 if (card && (cmd.error || data.error)) {
1231 sts_cmd.opcode = MMC_SEND_STATUS;
1232 sts_cmd.arg = card->rca << 16;
1233 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1234 sts_retry = 5;
1235 while (sts_retry) {
1236 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1237
1238 if (sts_cmd.error ||
1239 (R1_CURRENT_STATE(sts_cmd.resp[0])
1240 != R1_STATE_TRAN)) {
1241 sts_retry--;
1242 /*
1243 * wait for at least 146 MCLK cycles for
1244 * the card to move to TRANS state. As
1245 * the MCLK would be min 200MHz for
1246 * tuning, we need max 0.73us delay. To
1247 * be on safer side 1ms delay is given.
1248 */
1249 usleep_range(1000, 1200);
1250 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1251 mmc_hostname(mmc), phase,
1252 sts_cmd.error, sts_cmd.resp[0]);
1253 continue;
1254 }
1255 break;
1256 };
1257 }
1258
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001259 if (!cmd.error && !data.error &&
1260 !memcmp(data_buf, tuning_block_pattern, size)) {
1261 /* tuning is successful at this tuning point */
1262 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001263 pr_debug("%s: %s: found *** good *** phase = %d\n",
1264 mmc_hostname(mmc), __func__, phase);
1265 } else {
1266 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001267 mmc_hostname(mmc), __func__, phase);
1268 }
1269 } while (++phase < 16);
1270
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301271 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1272 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001273 /*
1274 * If all phases pass then its a problem. So change the card's
1275 * drive type to a different value, if supported and repeat
1276 * tuning until at least one phase fails. Then set the original
1277 * drive type back.
1278 *
1279 * If all the phases still pass after trying all possible
1280 * drive types, then one of those 16 phases will be picked.
1281 * This is no different from what was going on before the
1282 * modification to change drive type and retune.
1283 */
1284 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1285 tuned_phase_cnt);
1286
1287 /* set drive type to other value . default setting is 0x0 */
1288 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001289 pr_debug("%s: trying different drive strength (%d)\n",
1290 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001291 if (card->ext_csd.raw_driver_strength &
1292 (1 << drv_type)) {
1293 sdhci_msm_set_mmc_drv_type(host, opcode,
1294 drv_type);
1295 if (!drv_type_changed)
1296 drv_type_changed = true;
1297 goto retry;
1298 }
1299 }
1300 }
1301
1302 /* reset drive type to default (50 ohm) if changed */
1303 if (drv_type_changed)
1304 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1305
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001306 if (tuned_phase_cnt) {
1307 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1308 tuned_phase_cnt);
1309 if (rc < 0)
1310 goto kfree;
1311 else
1312 phase = (u8)rc;
1313
1314 /*
1315 * Finally set the selected phase in delay
1316 * line hw block.
1317 */
1318 rc = msm_config_cm_dll_phase(host, phase);
1319 if (rc)
1320 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001321 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001322 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1323 mmc_hostname(mmc), __func__, phase);
1324 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301325 if (--tuning_seq_cnt)
1326 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001327 /* tuning failed */
1328 pr_err("%s: %s: no tuning point found\n",
1329 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301330 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001331 }
1332
1333kfree:
1334 kfree(data_buf);
1335out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001336 spin_lock_irqsave(&host->lock, flags);
1337 if (!rc)
1338 msm_host->tuning_done = true;
1339 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301340 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001341 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001342 return rc;
1343}
1344
Asutosh Das0ef24812012-12-18 16:14:02 +05301345static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1346{
1347 struct sdhci_msm_gpio_data *curr;
1348 int i, ret = 0;
1349
1350 curr = pdata->pin_data->gpio_data;
1351 for (i = 0; i < curr->size; i++) {
1352 if (!gpio_is_valid(curr->gpio[i].no)) {
1353 ret = -EINVAL;
1354 pr_err("%s: Invalid gpio = %d\n", __func__,
1355 curr->gpio[i].no);
1356 goto free_gpios;
1357 }
1358 if (enable) {
1359 ret = gpio_request(curr->gpio[i].no,
1360 curr->gpio[i].name);
1361 if (ret) {
1362 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1363 __func__, curr->gpio[i].no,
1364 curr->gpio[i].name, ret);
1365 goto free_gpios;
1366 }
1367 curr->gpio[i].is_enabled = true;
1368 } else {
1369 gpio_free(curr->gpio[i].no);
1370 curr->gpio[i].is_enabled = false;
1371 }
1372 }
1373 return ret;
1374
1375free_gpios:
1376 for (i--; i >= 0; i--) {
1377 gpio_free(curr->gpio[i].no);
1378 curr->gpio[i].is_enabled = false;
1379 }
1380 return ret;
1381}
1382
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301383static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1384 bool enable)
1385{
1386 int ret = 0;
1387
1388 if (enable)
1389 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1390 pdata->pctrl_data->pins_active);
1391 else
1392 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1393 pdata->pctrl_data->pins_sleep);
1394
1395 if (ret < 0)
1396 pr_err("%s state for pinctrl failed with %d\n",
1397 enable ? "Enabling" : "Disabling", ret);
1398
1399 return ret;
1400}
1401
Asutosh Das0ef24812012-12-18 16:14:02 +05301402static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1403{
1404 int ret = 0;
1405
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301406 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301407 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301408 } else if (pdata->pctrl_data) {
1409 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1410 goto out;
1411 } else if (!pdata->pin_data) {
1412 return 0;
1413 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301414
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301415 if (pdata->pin_data->is_gpio)
1416 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301417out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301418 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301419 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301420
1421 return ret;
1422}
1423
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301424static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1425 u32 **out, int *len, u32 size)
1426{
1427 int ret = 0;
1428 struct device_node *np = dev->of_node;
1429 size_t sz;
1430 u32 *arr = NULL;
1431
1432 if (!of_get_property(np, prop_name, len)) {
1433 ret = -EINVAL;
1434 goto out;
1435 }
1436 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001437 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301438 dev_err(dev, "%s invalid size\n", prop_name);
1439 ret = -EINVAL;
1440 goto out;
1441 }
1442
1443 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1444 if (!arr) {
1445 dev_err(dev, "%s failed allocating memory\n", prop_name);
1446 ret = -ENOMEM;
1447 goto out;
1448 }
1449
1450 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1451 if (ret < 0) {
1452 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1453 goto out;
1454 }
1455 *out = arr;
1456out:
1457 if (ret)
1458 *len = 0;
1459 return ret;
1460}
1461
Asutosh Das0ef24812012-12-18 16:14:02 +05301462#define MAX_PROP_SIZE 32
1463static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1464 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1465{
1466 int len, ret = 0;
1467 const __be32 *prop;
1468 char prop_name[MAX_PROP_SIZE];
1469 struct sdhci_msm_reg_data *vreg;
1470 struct device_node *np = dev->of_node;
1471
1472 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1473 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301474 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301475 return ret;
1476 }
1477
1478 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1479 if (!vreg) {
1480 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1481 ret = -ENOMEM;
1482 return ret;
1483 }
1484
1485 vreg->name = vreg_name;
1486
1487 snprintf(prop_name, MAX_PROP_SIZE,
1488 "qcom,%s-always-on", vreg_name);
1489 if (of_get_property(np, prop_name, NULL))
1490 vreg->is_always_on = true;
1491
1492 snprintf(prop_name, MAX_PROP_SIZE,
1493 "qcom,%s-lpm-sup", vreg_name);
1494 if (of_get_property(np, prop_name, NULL))
1495 vreg->lpm_sup = true;
1496
1497 snprintf(prop_name, MAX_PROP_SIZE,
1498 "qcom,%s-voltage-level", vreg_name);
1499 prop = of_get_property(np, prop_name, &len);
1500 if (!prop || (len != (2 * sizeof(__be32)))) {
1501 dev_warn(dev, "%s %s property\n",
1502 prop ? "invalid format" : "no", prop_name);
1503 } else {
1504 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1505 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1506 }
1507
1508 snprintf(prop_name, MAX_PROP_SIZE,
1509 "qcom,%s-current-level", vreg_name);
1510 prop = of_get_property(np, prop_name, &len);
1511 if (!prop || (len != (2 * sizeof(__be32)))) {
1512 dev_warn(dev, "%s %s property\n",
1513 prop ? "invalid format" : "no", prop_name);
1514 } else {
1515 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1516 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1517 }
1518
1519 *vreg_data = vreg;
1520 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1521 vreg->name, vreg->is_always_on ? "always_on," : "",
1522 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1523 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1524
1525 return ret;
1526}
1527
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301528static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1529 struct sdhci_msm_pltfm_data *pdata)
1530{
1531 struct sdhci_pinctrl_data *pctrl_data;
1532 struct pinctrl *pctrl;
1533 int ret = 0;
1534
1535 /* Try to obtain pinctrl handle */
1536 pctrl = devm_pinctrl_get(dev);
1537 if (IS_ERR(pctrl)) {
1538 ret = PTR_ERR(pctrl);
1539 goto out;
1540 }
1541 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1542 if (!pctrl_data) {
1543 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1544 ret = -ENOMEM;
1545 goto out;
1546 }
1547 pctrl_data->pctrl = pctrl;
1548 /* Look-up and keep the states handy to be used later */
1549 pctrl_data->pins_active = pinctrl_lookup_state(
1550 pctrl_data->pctrl, "active");
1551 if (IS_ERR(pctrl_data->pins_active)) {
1552 ret = PTR_ERR(pctrl_data->pins_active);
1553 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1554 goto out;
1555 }
1556 pctrl_data->pins_sleep = pinctrl_lookup_state(
1557 pctrl_data->pctrl, "sleep");
1558 if (IS_ERR(pctrl_data->pins_sleep)) {
1559 ret = PTR_ERR(pctrl_data->pins_sleep);
1560 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1561 goto out;
1562 }
1563 pdata->pctrl_data = pctrl_data;
1564out:
1565 return ret;
1566}
1567
Asutosh Das0ef24812012-12-18 16:14:02 +05301568#define GPIO_NAME_MAX_LEN 32
1569static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1570 struct sdhci_msm_pltfm_data *pdata)
1571{
1572 int ret = 0, cnt, i;
1573 struct sdhci_msm_pin_data *pin_data;
1574 struct device_node *np = dev->of_node;
1575
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301576 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1577 if (!ret) {
1578 goto out;
1579 } else if (ret == -EPROBE_DEFER) {
1580 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1581 goto out;
1582 } else {
1583 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1584 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301585 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301586 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301587 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1588 if (!pin_data) {
1589 dev_err(dev, "No memory for pin_data\n");
1590 ret = -ENOMEM;
1591 goto out;
1592 }
1593
1594 cnt = of_gpio_count(np);
1595 if (cnt > 0) {
1596 pin_data->gpio_data = devm_kzalloc(dev,
1597 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1598 if (!pin_data->gpio_data) {
1599 dev_err(dev, "No memory for gpio_data\n");
1600 ret = -ENOMEM;
1601 goto out;
1602 }
1603 pin_data->gpio_data->size = cnt;
1604 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1605 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1606
1607 if (!pin_data->gpio_data->gpio) {
1608 dev_err(dev, "No memory for gpio\n");
1609 ret = -ENOMEM;
1610 goto out;
1611 }
1612
1613 for (i = 0; i < cnt; i++) {
1614 const char *name = NULL;
1615 char result[GPIO_NAME_MAX_LEN];
1616 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1617 of_property_read_string_index(np,
1618 "qcom,gpio-names", i, &name);
1619
1620 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1621 dev_name(dev), name ? name : "?");
1622 pin_data->gpio_data->gpio[i].name = result;
1623 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1624 pin_data->gpio_data->gpio[i].name,
1625 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301626 }
1627 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301628 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301629out:
1630 if (ret)
1631 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1632 return ret;
1633}
1634
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001635#ifdef CONFIG_SMP
1636static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1637{
1638 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1639}
1640#else
1641static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1642#endif
1643
Gilad Bronerc788a672015-09-08 15:39:11 +03001644static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1645 struct sdhci_msm_pltfm_data *pdata)
1646{
1647 struct device_node *np = dev->of_node;
1648 const char *str;
1649 u32 cpu;
1650 int ret = 0;
1651 int i;
1652
1653 pdata->pm_qos_data.irq_valid = false;
1654 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1655 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1656 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001657 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001658 }
1659
1660 /* must specify cpu for "affine_cores" type */
1661 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1662 pdata->pm_qos_data.irq_cpu = -1;
1663 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1664 if (ret) {
1665 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1666 ret);
1667 goto out;
1668 }
1669 if (cpu < 0 || cpu >= num_possible_cpus()) {
1670 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1671 __func__, cpu, num_possible_cpus());
1672 ret = -EINVAL;
1673 goto out;
1674 }
1675 pdata->pm_qos_data.irq_cpu = cpu;
1676 }
1677
1678 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1679 SDHCI_POWER_POLICY_NUM) {
1680 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1681 __func__, SDHCI_POWER_POLICY_NUM);
1682 ret = -EINVAL;
1683 goto out;
1684 }
1685
1686 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1687 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1688 &pdata->pm_qos_data.irq_latency.latency[i]);
1689
1690 pdata->pm_qos_data.irq_valid = true;
1691out:
1692 return ret;
1693}
1694
1695static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1696 struct sdhci_msm_pltfm_data *pdata)
1697{
1698 struct device_node *np = dev->of_node;
1699 u32 mask;
1700 int nr_groups;
1701 int ret;
1702 int i;
1703
1704 /* Read cpu group mapping */
1705 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1706 if (nr_groups <= 0) {
1707 ret = -EINVAL;
1708 goto out;
1709 }
1710 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1711 pdata->pm_qos_data.cpu_group_map.mask =
1712 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1713 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1714 ret = -ENOMEM;
1715 goto out;
1716 }
1717
1718 for (i = 0; i < nr_groups; i++) {
1719 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1720 i, &mask);
1721
1722 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1723 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1724 cpu_possible_mask)) {
1725 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1726 __func__, mask, i);
1727 ret = -EINVAL;
1728 goto free_res;
1729 }
1730 }
1731 return 0;
1732
1733free_res:
1734 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1735out:
1736 return ret;
1737}
1738
1739static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1740 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1741{
1742 struct device_node *np = dev->of_node;
1743 struct sdhci_msm_pm_qos_latency *values;
1744 int ret;
1745 int i;
1746 int group;
1747 int cfg;
1748
1749 ret = of_property_count_u32_elems(np, name);
1750 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1751 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1752 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1753 ret);
1754 return -EINVAL;
1755 } else if (ret < 0) {
1756 return ret;
1757 }
1758
1759 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1760 GFP_KERNEL);
1761 if (!values)
1762 return -ENOMEM;
1763
1764 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1765 group = i / SDHCI_POWER_POLICY_NUM;
1766 cfg = i % SDHCI_POWER_POLICY_NUM;
1767 of_property_read_u32_index(np, name, i,
1768 &(values[group].latency[cfg]));
1769 }
1770
1771 *latency = values;
1772 return 0;
1773}
1774
1775static void sdhci_msm_pm_qos_parse(struct device *dev,
1776 struct sdhci_msm_pltfm_data *pdata)
1777{
1778 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1779 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1780 __func__);
1781
1782 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1783 pdata->pm_qos_data.cmdq_valid =
1784 !sdhci_msm_pm_qos_parse_latency(dev,
1785 "qcom,pm-qos-cmdq-latency-us",
1786 pdata->pm_qos_data.cpu_group_map.nr_groups,
1787 &pdata->pm_qos_data.cmdq_latency);
1788 pdata->pm_qos_data.legacy_valid =
1789 !sdhci_msm_pm_qos_parse_latency(dev,
1790 "qcom,pm-qos-legacy-latency-us",
1791 pdata->pm_qos_data.cpu_group_map.nr_groups,
1792 &pdata->pm_qos_data.latency);
1793 if (!pdata->pm_qos_data.cmdq_valid &&
1794 !pdata->pm_qos_data.legacy_valid) {
1795 /* clean-up previously allocated arrays */
1796 kfree(pdata->pm_qos_data.latency);
1797 kfree(pdata->pm_qos_data.cmdq_latency);
1798 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1799 __func__);
1800 }
1801 } else {
1802 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1803 __func__);
1804 }
1805}
1806
Asutosh Das0ef24812012-12-18 16:14:02 +05301807/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001808static
1809struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1810 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301811{
1812 struct sdhci_msm_pltfm_data *pdata = NULL;
1813 struct device_node *np = dev->of_node;
1814 u32 bus_width = 0;
1815 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301816 int clk_table_len;
1817 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301818 int ice_clk_table_len;
1819 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301820 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301821 const char *lower_bus_speed = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301822
1823 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1824 if (!pdata) {
1825 dev_err(dev, "failed to allocate memory for platform data\n");
1826 goto out;
1827 }
1828
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301829 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001830 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301831 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301832
Asutosh Das0ef24812012-12-18 16:14:02 +05301833 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1834 if (bus_width == 8)
1835 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1836 else if (bus_width == 4)
1837 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1838 else {
1839 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1840 pdata->mmc_bus_width = 0;
1841 }
1842
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001843 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301844 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1845 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001846 pr_debug("%s: no clock scaling frequencies were supplied\n",
1847 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301848 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1849 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1850 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001851
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301852 /*
1853 * Few hosts can support DDR52 mode at the same lower
1854 * system voltage corner as high-speed mode. In such cases,
1855 * it is always better to put it in DDR mode which will
1856 * improve the performance without any power impact.
1857 */
1858 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1859 &lower_bus_speed)) {
1860 if (!strcmp(lower_bus_speed, "DDR52"))
1861 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1862 MMC_SCALING_LOWER_DDR52_MODE;
1863 }
1864
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301865 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1866 &clk_table, &clk_table_len, 0)) {
1867 dev_err(dev, "failed parsing supported clock rates\n");
1868 goto out;
1869 }
1870 if (!clk_table || !clk_table_len) {
1871 dev_err(dev, "Invalid clock table\n");
1872 goto out;
1873 }
1874 pdata->sup_clk_table = clk_table;
1875 pdata->sup_clk_cnt = clk_table_len;
1876
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301877 if (msm_host->ice.pdev) {
1878 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
1879 &ice_clk_table, &ice_clk_table_len, 0)) {
1880 dev_err(dev, "failed parsing supported ice clock rates\n");
1881 goto out;
1882 }
1883 if (!ice_clk_table || !ice_clk_table_len) {
1884 dev_err(dev, "Invalid clock table\n");
1885 goto out;
1886 }
1887 pdata->sup_ice_clk_table = ice_clk_table;
1888 pdata->sup_ice_clk_cnt = ice_clk_table_len;
1889 }
1890
Asutosh Das0ef24812012-12-18 16:14:02 +05301891 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1892 sdhci_msm_slot_reg_data),
1893 GFP_KERNEL);
1894 if (!pdata->vreg_data) {
1895 dev_err(dev, "failed to allocate memory for vreg data\n");
1896 goto out;
1897 }
1898
1899 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1900 "vdd")) {
1901 dev_err(dev, "failed parsing vdd data\n");
1902 goto out;
1903 }
1904 if (sdhci_msm_dt_parse_vreg_info(dev,
1905 &pdata->vreg_data->vdd_io_data,
1906 "vdd-io")) {
1907 dev_err(dev, "failed parsing vdd-io data\n");
1908 goto out;
1909 }
1910
1911 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1912 dev_err(dev, "failed parsing gpio data\n");
1913 goto out;
1914 }
1915
Asutosh Das0ef24812012-12-18 16:14:02 +05301916 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1917
1918 for (i = 0; i < len; i++) {
1919 const char *name = NULL;
1920
1921 of_property_read_string_index(np,
1922 "qcom,bus-speed-mode", i, &name);
1923 if (!name)
1924 continue;
1925
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001926 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1927 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1928 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1929 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1930 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301931 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1932 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1933 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1934 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1935 pdata->caps |= MMC_CAP_1_8V_DDR
1936 | MMC_CAP_UHS_DDR50;
1937 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1938 pdata->caps |= MMC_CAP_1_2V_DDR
1939 | MMC_CAP_UHS_DDR50;
1940 }
1941
1942 if (of_get_property(np, "qcom,nonremovable", NULL))
1943 pdata->nonremovable = true;
1944
Guoping Yuf7c91332014-08-20 16:56:18 +08001945 if (of_get_property(np, "qcom,nonhotplug", NULL))
1946 pdata->nonhotplug = true;
1947
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001948 pdata->largeaddressbus =
1949 of_property_read_bool(np, "qcom,large-address-bus");
1950
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001951 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1952 msm_host->mmc->wakeup_on_idle = true;
1953
Gilad Bronerc788a672015-09-08 15:39:11 +03001954 sdhci_msm_pm_qos_parse(dev, pdata);
1955
Pavan Anamula5a256df2015-10-16 14:38:28 +05301956 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05301957 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05301958
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001959 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07001960 msm_host->regs_restore.is_supported =
1961 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001962
Asutosh Das0ef24812012-12-18 16:14:02 +05301963 return pdata;
1964out:
1965 return NULL;
1966}
1967
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301968/* Returns required bandwidth in Bytes per Sec */
1969static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1970 struct mmc_ios *ios)
1971{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301972 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1973 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1974
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301975 unsigned int bw;
1976
Sahitya Tummala2886c922013-04-03 18:03:31 +05301977 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301978 /*
1979 * For DDR mode, SDCC controller clock will be at
1980 * the double rate than the actual clock that goes to card.
1981 */
1982 if (ios->bus_width == MMC_BUS_WIDTH_4)
1983 bw /= 2;
1984 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1985 bw /= 8;
1986
1987 return bw;
1988}
1989
1990static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1991 unsigned int bw)
1992{
1993 unsigned int *table = host->pdata->voting_data->bw_vecs;
1994 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1995 int i;
1996
1997 if (host->msm_bus_vote.is_max_bw_needed && bw)
1998 return host->msm_bus_vote.max_bw_vote;
1999
2000 for (i = 0; i < size; i++) {
2001 if (bw <= table[i])
2002 break;
2003 }
2004
2005 if (i && (i == size))
2006 i--;
2007
2008 return i;
2009}
2010
2011/*
2012 * This function must be called with host lock acquired.
2013 * Caller of this function should also ensure that msm bus client
2014 * handle is not null.
2015 */
2016static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2017 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302018 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302019{
2020 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2021 int rc = 0;
2022
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302023 BUG_ON(!flags);
2024
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302025 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302026 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302027 rc = msm_bus_scale_client_update_request(
2028 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302029 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302030 if (rc) {
2031 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2032 mmc_hostname(host->mmc),
2033 msm_host->msm_bus_vote.client_handle, vote, rc);
2034 goto out;
2035 }
2036 msm_host->msm_bus_vote.curr_vote = vote;
2037 }
2038out:
2039 return rc;
2040}
2041
2042/*
2043 * Internal work. Work to set 0 bandwidth for msm bus.
2044 */
2045static void sdhci_msm_bus_work(struct work_struct *work)
2046{
2047 struct sdhci_msm_host *msm_host;
2048 struct sdhci_host *host;
2049 unsigned long flags;
2050
2051 msm_host = container_of(work, struct sdhci_msm_host,
2052 msm_bus_vote.vote_work.work);
2053 host = platform_get_drvdata(msm_host->pdev);
2054
2055 if (!msm_host->msm_bus_vote.client_handle)
2056 return;
2057
2058 spin_lock_irqsave(&host->lock, flags);
2059 /* don't vote for 0 bandwidth if any request is in progress */
2060 if (!host->mrq) {
2061 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302062 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302063 } else
2064 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2065 mmc_hostname(host->mmc), __func__);
2066 spin_unlock_irqrestore(&host->lock, flags);
2067}
2068
2069/*
2070 * This function cancels any scheduled delayed work and sets the bus
2071 * vote based on bw (bandwidth) argument.
2072 */
2073static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2074 unsigned int bw)
2075{
2076 int vote;
2077 unsigned long flags;
2078 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2079 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2080
2081 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2082 spin_lock_irqsave(&host->lock, flags);
2083 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302084 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302085 spin_unlock_irqrestore(&host->lock, flags);
2086}
2087
2088#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2089
2090/* This function queues a work which will set the bandwidth requiement to 0 */
2091static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2092{
2093 unsigned long flags;
2094 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2095 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2096
2097 spin_lock_irqsave(&host->lock, flags);
2098 if (msm_host->msm_bus_vote.min_bw_vote !=
2099 msm_host->msm_bus_vote.curr_vote)
2100 queue_delayed_work(system_wq,
2101 &msm_host->msm_bus_vote.vote_work,
2102 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2103 spin_unlock_irqrestore(&host->lock, flags);
2104}
2105
2106static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2107 struct platform_device *pdev)
2108{
2109 int rc = 0;
2110 struct msm_bus_scale_pdata *bus_pdata;
2111
2112 struct sdhci_msm_bus_voting_data *data;
2113 struct device *dev = &pdev->dev;
2114
2115 data = devm_kzalloc(dev,
2116 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2117 if (!data) {
2118 dev_err(&pdev->dev,
2119 "%s: failed to allocate memory\n", __func__);
2120 rc = -ENOMEM;
2121 goto out;
2122 }
2123 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2124 if (data->bus_pdata) {
2125 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2126 &data->bw_vecs, &data->bw_vecs_size, 0);
2127 if (rc) {
2128 dev_err(&pdev->dev,
2129 "%s: Failed to get bus-bw-vectors-bps\n",
2130 __func__);
2131 goto out;
2132 }
2133 host->pdata->voting_data = data;
2134 }
2135 if (host->pdata->voting_data &&
2136 host->pdata->voting_data->bus_pdata &&
2137 host->pdata->voting_data->bw_vecs &&
2138 host->pdata->voting_data->bw_vecs_size) {
2139
2140 bus_pdata = host->pdata->voting_data->bus_pdata;
2141 host->msm_bus_vote.client_handle =
2142 msm_bus_scale_register_client(bus_pdata);
2143 if (!host->msm_bus_vote.client_handle) {
2144 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2145 rc = -EFAULT;
2146 goto out;
2147 }
2148 /* cache the vote index for minimum and maximum bandwidth */
2149 host->msm_bus_vote.min_bw_vote =
2150 sdhci_msm_bus_get_vote_for_bw(host, 0);
2151 host->msm_bus_vote.max_bw_vote =
2152 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2153 } else {
2154 devm_kfree(dev, data);
2155 }
2156
2157out:
2158 return rc;
2159}
2160
2161static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2162{
2163 if (host->msm_bus_vote.client_handle)
2164 msm_bus_scale_unregister_client(
2165 host->msm_bus_vote.client_handle);
2166}
2167
2168static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2169{
2170 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2171 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2172 struct mmc_ios *ios = &host->mmc->ios;
2173 unsigned int bw;
2174
2175 if (!msm_host->msm_bus_vote.client_handle)
2176 return;
2177
2178 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302179 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302180 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302181 } else {
2182 /*
2183 * If clock gating is enabled, then remove the vote
2184 * immediately because clocks will be disabled only
2185 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2186 * additional delay is required to remove the bus vote.
2187 */
2188#ifdef CONFIG_MMC_CLKGATE
2189 if (host->mmc->clkgate_delay)
2190 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2191 else
2192#endif
2193 sdhci_msm_bus_queue_work(host);
2194 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302195}
2196
Asutosh Das0ef24812012-12-18 16:14:02 +05302197/* Regulator utility functions */
2198static int sdhci_msm_vreg_init_reg(struct device *dev,
2199 struct sdhci_msm_reg_data *vreg)
2200{
2201 int ret = 0;
2202
2203 /* check if regulator is already initialized? */
2204 if (vreg->reg)
2205 goto out;
2206
2207 /* Get the regulator handle */
2208 vreg->reg = devm_regulator_get(dev, vreg->name);
2209 if (IS_ERR(vreg->reg)) {
2210 ret = PTR_ERR(vreg->reg);
2211 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2212 __func__, vreg->name, ret);
2213 goto out;
2214 }
2215
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302216 if (regulator_count_voltages(vreg->reg) > 0) {
2217 vreg->set_voltage_sup = true;
2218 /* sanity check */
2219 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2220 pr_err("%s: %s invalid constraints specified\n",
2221 __func__, vreg->name);
2222 ret = -EINVAL;
2223 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302224 }
2225
2226out:
2227 return ret;
2228}
2229
2230static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2231{
2232 if (vreg->reg)
2233 devm_regulator_put(vreg->reg);
2234}
2235
2236static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2237 *vreg, int uA_load)
2238{
2239 int ret = 0;
2240
2241 /*
2242 * regulators that do not support regulator_set_voltage also
2243 * do not support regulator_set_optimum_mode
2244 */
2245 if (vreg->set_voltage_sup) {
2246 ret = regulator_set_load(vreg->reg, uA_load);
2247 if (ret < 0)
2248 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2249 __func__, vreg->name, uA_load, ret);
2250 else
2251 /*
2252 * regulator_set_load() can return non zero
2253 * value even for success case.
2254 */
2255 ret = 0;
2256 }
2257 return ret;
2258}
2259
2260static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2261 int min_uV, int max_uV)
2262{
2263 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302264 if (vreg->set_voltage_sup) {
2265 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2266 if (ret) {
2267 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302268 __func__, vreg->name, min_uV, max_uV, ret);
2269 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302270 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302271
2272 return ret;
2273}
2274
2275static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2276{
2277 int ret = 0;
2278
2279 /* Put regulator in HPM (high power mode) */
2280 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2281 if (ret < 0)
2282 return ret;
2283
2284 if (!vreg->is_enabled) {
2285 /* Set voltage level */
2286 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2287 vreg->high_vol_level);
2288 if (ret)
2289 return ret;
2290 }
2291 ret = regulator_enable(vreg->reg);
2292 if (ret) {
2293 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2294 __func__, vreg->name, ret);
2295 return ret;
2296 }
2297 vreg->is_enabled = true;
2298 return ret;
2299}
2300
2301static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2302{
2303 int ret = 0;
2304
2305 /* Never disable regulator marked as always_on */
2306 if (vreg->is_enabled && !vreg->is_always_on) {
2307 ret = regulator_disable(vreg->reg);
2308 if (ret) {
2309 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2310 __func__, vreg->name, ret);
2311 goto out;
2312 }
2313 vreg->is_enabled = false;
2314
2315 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2316 if (ret < 0)
2317 goto out;
2318
2319 /* Set min. voltage level to 0 */
2320 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2321 if (ret)
2322 goto out;
2323 } else if (vreg->is_enabled && vreg->is_always_on) {
2324 if (vreg->lpm_sup) {
2325 /* Put always_on regulator in LPM (low power mode) */
2326 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2327 vreg->lpm_uA);
2328 if (ret < 0)
2329 goto out;
2330 }
2331 }
2332out:
2333 return ret;
2334}
2335
2336static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2337 bool enable, bool is_init)
2338{
2339 int ret = 0, i;
2340 struct sdhci_msm_slot_reg_data *curr_slot;
2341 struct sdhci_msm_reg_data *vreg_table[2];
2342
2343 curr_slot = pdata->vreg_data;
2344 if (!curr_slot) {
2345 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2346 __func__);
2347 goto out;
2348 }
2349
2350 vreg_table[0] = curr_slot->vdd_data;
2351 vreg_table[1] = curr_slot->vdd_io_data;
2352
2353 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2354 if (vreg_table[i]) {
2355 if (enable)
2356 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2357 else
2358 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2359 if (ret)
2360 goto out;
2361 }
2362 }
2363out:
2364 return ret;
2365}
2366
Asutosh Das0ef24812012-12-18 16:14:02 +05302367/* This init function should be called only once for each SDHC slot */
2368static int sdhci_msm_vreg_init(struct device *dev,
2369 struct sdhci_msm_pltfm_data *pdata,
2370 bool is_init)
2371{
2372 int ret = 0;
2373 struct sdhci_msm_slot_reg_data *curr_slot;
2374 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2375
2376 curr_slot = pdata->vreg_data;
2377 if (!curr_slot)
2378 goto out;
2379
2380 curr_vdd_reg = curr_slot->vdd_data;
2381 curr_vdd_io_reg = curr_slot->vdd_io_data;
2382
2383 if (!is_init)
2384 /* Deregister all regulators from regulator framework */
2385 goto vdd_io_reg_deinit;
2386
2387 /*
2388 * Get the regulator handle from voltage regulator framework
2389 * and then try to set the voltage level for the regulator
2390 */
2391 if (curr_vdd_reg) {
2392 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2393 if (ret)
2394 goto out;
2395 }
2396 if (curr_vdd_io_reg) {
2397 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2398 if (ret)
2399 goto vdd_reg_deinit;
2400 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302401
Asutosh Das0ef24812012-12-18 16:14:02 +05302402 if (ret)
2403 dev_err(dev, "vreg reset failed (%d)\n", ret);
2404 goto out;
2405
2406vdd_io_reg_deinit:
2407 if (curr_vdd_io_reg)
2408 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2409vdd_reg_deinit:
2410 if (curr_vdd_reg)
2411 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2412out:
2413 return ret;
2414}
2415
2416
2417static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2418 enum vdd_io_level level,
2419 unsigned int voltage_level)
2420{
2421 int ret = 0;
2422 int set_level;
2423 struct sdhci_msm_reg_data *vdd_io_reg;
2424
2425 if (!pdata->vreg_data)
2426 return ret;
2427
2428 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2429 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2430 switch (level) {
2431 case VDD_IO_LOW:
2432 set_level = vdd_io_reg->low_vol_level;
2433 break;
2434 case VDD_IO_HIGH:
2435 set_level = vdd_io_reg->high_vol_level;
2436 break;
2437 case VDD_IO_SET_LEVEL:
2438 set_level = voltage_level;
2439 break;
2440 default:
2441 pr_err("%s: invalid argument level = %d",
2442 __func__, level);
2443 ret = -EINVAL;
2444 return ret;
2445 }
2446 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2447 set_level);
2448 }
2449 return ret;
2450}
2451
Ritesh Harjani42876f42015-11-17 17:46:51 +05302452/*
2453 * Acquire spin-lock host->lock before calling this function
2454 */
2455static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2456 bool enable)
2457{
2458 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2459 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2460
2461 if (enable && !msm_host->is_sdiowakeup_enabled)
2462 enable_irq(msm_host->pdata->sdiowakeup_irq);
2463 else if (!enable && msm_host->is_sdiowakeup_enabled)
2464 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2465 else
2466 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2467 __func__, enable, msm_host->is_sdiowakeup_enabled);
2468 msm_host->is_sdiowakeup_enabled = enable;
2469}
2470
2471static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2472{
2473 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302474 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2475 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2476
Ritesh Harjani42876f42015-11-17 17:46:51 +05302477 unsigned long flags;
2478
2479 pr_debug("%s: irq (%d) received\n", __func__, irq);
2480
2481 spin_lock_irqsave(&host->lock, flags);
2482 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2483 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302484 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302485
2486 return IRQ_HANDLED;
2487}
2488
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302489void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2490{
2491 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2492 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302493 const struct sdhci_msm_offset *msm_host_offset =
2494 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302495
2496 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2497 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302498 sdhci_msm_readl_relaxed(host,
2499 msm_host_offset->CORE_PWRCTL_STATUS),
2500 sdhci_msm_readl_relaxed(host,
2501 msm_host_offset->CORE_PWRCTL_MASK),
2502 sdhci_msm_readl_relaxed(host,
2503 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302504}
2505
Asutosh Das0ef24812012-12-18 16:14:02 +05302506static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2507{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002508 struct sdhci_host *host = (struct sdhci_host *)data;
2509 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2510 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302511 const struct sdhci_msm_offset *msm_host_offset =
2512 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302513 u8 irq_status = 0;
2514 u8 irq_ack = 0;
2515 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302516 int pwr_state = 0, io_level = 0;
2517 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302518 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302519
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302520 irq_status = sdhci_msm_readb_relaxed(host,
2521 msm_host_offset->CORE_PWRCTL_STATUS);
2522
Asutosh Das0ef24812012-12-18 16:14:02 +05302523 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2524 mmc_hostname(msm_host->mmc), irq, irq_status);
2525
2526 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302527 sdhci_msm_writeb_relaxed(irq_status, host,
2528 msm_host_offset->CORE_PWRCTL_CLEAR);
2529
Asutosh Das0ef24812012-12-18 16:14:02 +05302530 /*
2531 * SDHC has core_mem and hc_mem device memory and these memory
2532 * addresses do not fall within 1KB region. Hence, any update to
2533 * core_mem address space would require an mb() to ensure this gets
2534 * completed before its next update to registers within hc_mem.
2535 */
2536 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302537 /*
2538 * There is a rare HW scenario where the first clear pulse could be
2539 * lost when actual reset and clear/read of status register is
2540 * happening at a time. Hence, retry for at least 10 times to make
2541 * sure status register is cleared. Otherwise, this will result in
2542 * a spurious power IRQ resulting in system instability.
2543 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302544 while (irq_status & sdhci_msm_readb_relaxed(host,
2545 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302546 if (retry == 0) {
2547 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2548 mmc_hostname(host->mmc), irq_status);
2549 sdhci_msm_dump_pwr_ctrl_regs(host);
2550 BUG_ON(1);
2551 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302552 sdhci_msm_writeb_relaxed(irq_status, host,
2553 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302554 retry--;
2555 udelay(10);
2556 }
2557 if (likely(retry < 10))
2558 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2559 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302560
2561 /* Handle BUS ON/OFF*/
2562 if (irq_status & CORE_PWRCTL_BUS_ON) {
2563 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302564 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302566 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2567 VDD_IO_HIGH, 0);
2568 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302569 if (ret)
2570 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2571 else
2572 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302573
2574 pwr_state = REQ_BUS_ON;
2575 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302576 }
2577 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302578 if (msm_host->pltfm_init_done)
2579 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2580 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302581 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302582 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302583 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2584 VDD_IO_LOW, 0);
2585 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302586 if (ret)
2587 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2588 else
2589 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302590
2591 pwr_state = REQ_BUS_OFF;
2592 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302593 }
2594 /* Handle IO LOW/HIGH */
2595 if (irq_status & CORE_PWRCTL_IO_LOW) {
2596 /* Switch voltage Low */
2597 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2598 if (ret)
2599 irq_ack |= CORE_PWRCTL_IO_FAIL;
2600 else
2601 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302602
2603 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302604 }
2605 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2606 /* Switch voltage High */
2607 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2608 if (ret)
2609 irq_ack |= CORE_PWRCTL_IO_FAIL;
2610 else
2611 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302612
2613 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302614 }
2615
2616 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302617 sdhci_msm_writeb_relaxed(irq_ack, host,
2618 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302619 /*
2620 * SDHC has core_mem and hc_mem device memory and these memory
2621 * addresses do not fall within 1KB region. Hence, any update to
2622 * core_mem address space would require an mb() to ensure this gets
2623 * completed before its next update to registers within hc_mem.
2624 */
2625 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302626 if ((io_level & REQ_IO_HIGH) &&
2627 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2628 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302629 writel_relaxed((readl_relaxed(host->ioaddr +
2630 msm_host_offset->CORE_VENDOR_SPEC) &
2631 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2632 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002633 else if ((io_level & REQ_IO_LOW) ||
2634 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302635 writel_relaxed((readl_relaxed(host->ioaddr +
2636 msm_host_offset->CORE_VENDOR_SPEC) |
2637 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2638 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002639 mb();
2640
Asutosh Das0ef24812012-12-18 16:14:02 +05302641 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2642 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302643 spin_lock_irqsave(&host->lock, flags);
2644 if (pwr_state)
2645 msm_host->curr_pwr_state = pwr_state;
2646 if (io_level)
2647 msm_host->curr_io_level = io_level;
2648 complete(&msm_host->pwr_irq_completion);
2649 spin_unlock_irqrestore(&host->lock, flags);
2650
Asutosh Das0ef24812012-12-18 16:14:02 +05302651 return IRQ_HANDLED;
2652}
2653
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302654static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302655show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2656{
2657 struct sdhci_host *host = dev_get_drvdata(dev);
2658 int poll;
2659 unsigned long flags;
2660
2661 spin_lock_irqsave(&host->lock, flags);
2662 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2663 spin_unlock_irqrestore(&host->lock, flags);
2664
2665 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2666}
2667
2668static ssize_t
2669store_polling(struct device *dev, struct device_attribute *attr,
2670 const char *buf, size_t count)
2671{
2672 struct sdhci_host *host = dev_get_drvdata(dev);
2673 int value;
2674 unsigned long flags;
2675
2676 if (!kstrtou32(buf, 0, &value)) {
2677 spin_lock_irqsave(&host->lock, flags);
2678 if (value) {
2679 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2680 mmc_detect_change(host->mmc, 0);
2681 } else {
2682 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2683 }
2684 spin_unlock_irqrestore(&host->lock, flags);
2685 }
2686 return count;
2687}
2688
2689static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302690show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2691 char *buf)
2692{
2693 struct sdhci_host *host = dev_get_drvdata(dev);
2694 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2695 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2696
2697 return snprintf(buf, PAGE_SIZE, "%u\n",
2698 msm_host->msm_bus_vote.is_max_bw_needed);
2699}
2700
2701static ssize_t
2702store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2703 const char *buf, size_t count)
2704{
2705 struct sdhci_host *host = dev_get_drvdata(dev);
2706 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2707 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2708 uint32_t value;
2709 unsigned long flags;
2710
2711 if (!kstrtou32(buf, 0, &value)) {
2712 spin_lock_irqsave(&host->lock, flags);
2713 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2714 spin_unlock_irqrestore(&host->lock, flags);
2715 }
2716 return count;
2717}
2718
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302719static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302720{
2721 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2722 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302723 const struct sdhci_msm_offset *msm_host_offset =
2724 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302725 unsigned long flags;
2726 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302727 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302728
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302729 spin_lock_irqsave(&host->lock, flags);
2730 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2731 mmc_hostname(host->mmc), __func__, req_type,
2732 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302733 if (!msm_host->mci_removed)
2734 io_sig_sts = sdhci_msm_readl_relaxed(host,
2735 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302736
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302737 /*
2738 * The IRQ for request type IO High/Low will be generated when -
2739 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2740 * 2. If 1 is true and when there is a state change in 1.8V enable
2741 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2742 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2743 * layer tries to set it to 3.3V before card detection happens, the
2744 * IRQ doesn't get triggered as there is no state change in this bit.
2745 * The driver already handles this case by changing the IO voltage
2746 * level to high as part of controller power up sequence. Hence, check
2747 * for host->pwr to handle a case where IO voltage high request is
2748 * issued even before controller power up.
2749 */
2750 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2751 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2752 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2753 pr_debug("%s: do not wait for power IRQ that never comes\n",
2754 mmc_hostname(host->mmc));
2755 spin_unlock_irqrestore(&host->lock, flags);
2756 return;
2757 }
2758 }
2759
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302760 if ((req_type & msm_host->curr_pwr_state) ||
2761 (req_type & msm_host->curr_io_level))
2762 done = true;
2763 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302764
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302765 /*
2766 * This is needed here to hanlde a case where IRQ gets
2767 * triggered even before this function is called so that
2768 * x->done counter of completion gets reset. Otherwise,
2769 * next call to wait_for_completion returns immediately
2770 * without actually waiting for the IRQ to be handled.
2771 */
2772 if (done)
2773 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302774 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
2775 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
2776 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2777 mmc_hostname(host->mmc), req_type);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302778
2779 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2780 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302781}
2782
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002783static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2784{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302785 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2786 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2787 const struct sdhci_msm_offset *msm_host_offset =
2788 msm_host->offset;
2789 u32 config = readl_relaxed(host->ioaddr +
2790 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302791
2792 if (enable) {
2793 config |= CORE_CDR_EN;
2794 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302795 writel_relaxed(config, host->ioaddr +
2796 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302797 } else {
2798 config &= ~CORE_CDR_EN;
2799 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302800 writel_relaxed(config, host->ioaddr +
2801 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302802 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002803}
2804
Asutosh Das648f9d12013-01-10 21:11:04 +05302805static unsigned int sdhci_msm_max_segs(void)
2806{
2807 return SDHCI_MSM_MAX_SEGMENTS;
2808}
2809
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302810static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302811{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302812 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2813 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302814
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302815 return msm_host->pdata->sup_clk_table[0];
2816}
2817
2818static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2819{
2820 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2821 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2822 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2823
2824 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2825}
2826
2827static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2828 u32 req_clk)
2829{
2830 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2831 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2832 unsigned int sel_clk = -1;
2833 unsigned char cnt;
2834
2835 if (req_clk < sdhci_msm_get_min_clock(host)) {
2836 sel_clk = sdhci_msm_get_min_clock(host);
2837 return sel_clk;
2838 }
2839
2840 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2841 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2842 break;
2843 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2844 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2845 break;
2846 } else {
2847 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2848 }
2849 }
2850 return sel_clk;
2851}
2852
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002853static void sdhci_msm_registers_save(struct sdhci_host *host)
2854{
2855 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2856 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2857 const struct sdhci_msm_offset *msm_host_offset =
2858 msm_host->offset;
2859
2860 if (!msm_host->regs_restore.is_supported)
2861 return;
2862
2863 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
2864 msm_host_offset->CORE_VENDOR_SPEC);
2865 msm_host->regs_restore.vendor_pwrctl_mask =
2866 readl_relaxed(host->ioaddr +
2867 msm_host_offset->CORE_PWRCTL_MASK);
2868 msm_host->regs_restore.vendor_func2 =
2869 readl_relaxed(host->ioaddr +
2870 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2871 msm_host->regs_restore.vendor_func3 =
2872 readl_relaxed(host->ioaddr +
2873 msm_host_offset->CORE_VENDOR_SPEC3);
2874 msm_host->regs_restore.hc_2c_2e =
2875 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
2876 msm_host->regs_restore.hc_3c_3e =
2877 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
2878 msm_host->regs_restore.vendor_pwrctl_ctl =
2879 readl_relaxed(host->ioaddr +
2880 msm_host_offset->CORE_PWRCTL_CTL);
2881 msm_host->regs_restore.hc_38_3a =
2882 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
2883 msm_host->regs_restore.hc_34_36 =
2884 sdhci_readl(host, SDHCI_INT_ENABLE);
2885 msm_host->regs_restore.hc_28_2a =
2886 sdhci_readl(host, SDHCI_HOST_CONTROL);
2887 msm_host->regs_restore.vendor_caps_0 =
2888 readl_relaxed(host->ioaddr +
2889 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2890 msm_host->regs_restore.hc_caps_1 =
2891 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2892 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
2893 msm_host_offset->CORE_TESTBUS_CONFIG);
2894 msm_host->regs_restore.is_valid = true;
2895
2896 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
2897 mmc_hostname(host->mmc), __func__,
2898 readl_relaxed(host->ioaddr +
2899 msm_host_offset->CORE_PWRCTL_MASK));
2900}
2901
2902static void sdhci_msm_registers_restore(struct sdhci_host *host)
2903{
2904 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2905 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2906 const struct sdhci_msm_offset *msm_host_offset =
2907 msm_host->offset;
2908
2909 if (!msm_host->regs_restore.is_supported ||
2910 !msm_host->regs_restore.is_valid)
2911 return;
2912
2913 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
2914 msm_host_offset->CORE_VENDOR_SPEC);
2915 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
2916 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
2917 writel_relaxed(msm_host->regs_restore.vendor_func2,
2918 host->ioaddr +
2919 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2920 writel_relaxed(msm_host->regs_restore.vendor_func3,
2921 host->ioaddr +
2922 msm_host_offset->CORE_VENDOR_SPEC3);
2923 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
2924 SDHCI_CLOCK_CONTROL);
2925 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
2926 SDHCI_AUTO_CMD_ERR);
2927 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
2928 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
2929 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
2930 SDHCI_SIGNAL_ENABLE);
2931 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
2932 SDHCI_INT_ENABLE);
2933 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
2934 SDHCI_HOST_CONTROL);
2935 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
2936 host->ioaddr +
2937 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2938 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
2939 SDHCI_CAPABILITIES_1);
2940 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
2941 msm_host_offset->CORE_TESTBUS_CONFIG);
2942 msm_host->regs_restore.is_valid = false;
2943
2944 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
2945 mmc_hostname(host->mmc), __func__,
2946 readl_relaxed(host->ioaddr +
2947 msm_host_offset->CORE_PWRCTL_MASK));
2948}
2949
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302950static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2951{
2952 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2953 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2954 int rc = 0;
2955
2956 if (atomic_read(&msm_host->controller_clock))
2957 return 0;
2958
2959 sdhci_msm_bus_voting(host, 1);
2960
2961 if (!IS_ERR(msm_host->pclk)) {
2962 rc = clk_prepare_enable(msm_host->pclk);
2963 if (rc) {
2964 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2965 mmc_hostname(host->mmc), __func__, rc);
2966 goto remove_vote;
2967 }
2968 }
2969
2970 rc = clk_prepare_enable(msm_host->clk);
2971 if (rc) {
2972 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2973 mmc_hostname(host->mmc), __func__, rc);
2974 goto disable_pclk;
2975 }
2976
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302977 if (!IS_ERR(msm_host->ice_clk)) {
2978 rc = clk_prepare_enable(msm_host->ice_clk);
2979 if (rc) {
2980 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
2981 mmc_hostname(host->mmc), __func__, rc);
2982 goto disable_host_clk;
2983 }
2984 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302985 atomic_set(&msm_host->controller_clock, 1);
2986 pr_debug("%s: %s: enabled controller clock\n",
2987 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002988 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302989 goto out;
2990
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302991disable_host_clk:
2992 if (!IS_ERR(msm_host->clk))
2993 clk_disable_unprepare(msm_host->clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302994disable_pclk:
2995 if (!IS_ERR(msm_host->pclk))
2996 clk_disable_unprepare(msm_host->pclk);
2997remove_vote:
2998 if (msm_host->msm_bus_vote.client_handle)
2999 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3000out:
3001 return rc;
3002}
3003
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303004static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3005{
3006 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3007 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303008
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303009 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003010 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303011 if (!IS_ERR(msm_host->clk))
3012 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303013 if (!IS_ERR(msm_host->ice_clk))
3014 clk_disable_unprepare(msm_host->ice_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303015 if (!IS_ERR(msm_host->pclk))
3016 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303017 sdhci_msm_bus_voting(host, 0);
3018 atomic_set(&msm_host->controller_clock, 0);
3019 pr_debug("%s: %s: disabled controller clock\n",
3020 mmc_hostname(host->mmc), __func__);
3021 }
3022}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303023
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303024static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3025{
3026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3028 int rc = 0;
3029
3030 if (enable && !atomic_read(&msm_host->clks_on)) {
3031 pr_debug("%s: request to enable clocks\n",
3032 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303033
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303034 /*
3035 * The bus-width or the clock rate might have changed
3036 * after controller clocks are enbaled, update bus vote
3037 * in such case.
3038 */
3039 if (atomic_read(&msm_host->controller_clock))
3040 sdhci_msm_bus_voting(host, 1);
3041
3042 rc = sdhci_msm_enable_controller_clock(host);
3043 if (rc)
3044 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303045
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303046 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3047 rc = clk_prepare_enable(msm_host->bus_clk);
3048 if (rc) {
3049 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3050 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303051 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303052 }
3053 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003054 if (!IS_ERR(msm_host->ff_clk)) {
3055 rc = clk_prepare_enable(msm_host->ff_clk);
3056 if (rc) {
3057 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3058 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303059 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003060 }
3061 }
3062 if (!IS_ERR(msm_host->sleep_clk)) {
3063 rc = clk_prepare_enable(msm_host->sleep_clk);
3064 if (rc) {
3065 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3066 mmc_hostname(host->mmc), __func__, rc);
3067 goto disable_ff_clk;
3068 }
3069 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303070 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303071
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303072 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303073 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3074 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303075 /*
3076 * During 1.8V signal switching the clock source must
3077 * still be ON as it requires accessing SDHC
3078 * registers (SDHCi host control2 register bit 3 must
3079 * be written and polled after stopping the SDCLK).
3080 */
3081 if (host->mmc->card_clock_off)
3082 return 0;
3083 pr_debug("%s: request to disable clocks\n",
3084 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003085 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3086 clk_disable_unprepare(msm_host->sleep_clk);
3087 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3088 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303089 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3090 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003091 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303092 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303093 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303094 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003095disable_ff_clk:
3096 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3097 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303098disable_bus_clk:
3099 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3100 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303101disable_controller_clk:
3102 if (!IS_ERR_OR_NULL(msm_host->clk))
3103 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303104 if (!IS_ERR(msm_host->ice_clk))
3105 clk_disable_unprepare(msm_host->ice_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303106 if (!IS_ERR_OR_NULL(msm_host->pclk))
3107 clk_disable_unprepare(msm_host->pclk);
3108 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303109remove_vote:
3110 if (msm_host->msm_bus_vote.client_handle)
3111 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303112out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303113 return rc;
3114}
3115
3116static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3117{
3118 int rc;
3119 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3120 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303121 const struct sdhci_msm_offset *msm_host_offset =
3122 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003123 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303124 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003125 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303126 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303127
3128 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303129 /*
3130 * disable pwrsave to ensure clock is not auto-gated until
3131 * the rate is >400KHz (initialization complete).
3132 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303133 writel_relaxed(readl_relaxed(host->ioaddr +
3134 msm_host_offset->CORE_VENDOR_SPEC) &
3135 ~CORE_CLK_PWRSAVE, host->ioaddr +
3136 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303137 sdhci_msm_prepare_clocks(host, false);
3138 host->clock = clock;
3139 goto out;
3140 }
3141
3142 rc = sdhci_msm_prepare_clocks(host, true);
3143 if (rc)
3144 goto out;
3145
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303146 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3147 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303148 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003149 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303150 writel_relaxed(readl_relaxed(host->ioaddr +
3151 msm_host_offset->CORE_VENDOR_SPEC)
3152 | CORE_CLK_PWRSAVE, host->ioaddr +
3153 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303154 /*
3155 * Disable pwrsave for a newly added card if doesn't allow clock
3156 * gating.
3157 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003158 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303159 writel_relaxed(readl_relaxed(host->ioaddr +
3160 msm_host_offset->CORE_VENDOR_SPEC)
3161 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3162 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303163
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303164 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003165 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003166 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003167 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303168 /*
3169 * The SDHC requires internal clock frequency to be double the
3170 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003171 * uses the faster clock(100/400MHz) for some of its parts and
3172 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303173 */
3174 ddr_clock = clock * 2;
3175 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3176 ddr_clock);
3177 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003178
3179 /*
3180 * In general all timing modes are controlled via UHS mode select in
3181 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3182 * their respective modes defined here, hence we use these values.
3183 *
3184 * HS200 - SDR104 (Since they both are equivalent in functionality)
3185 * HS400 - This involves multiple configurations
3186 * Initially SDR104 - when tuning is required as HS200
3187 * Then when switching to DDR @ 400MHz (HS400) we use
3188 * the vendor specific HC_SELECT_IN to control the mode.
3189 *
3190 * In addition to controlling the modes we also need to select the
3191 * correct input clock for DLL depending on the mode.
3192 *
3193 * HS400 - divided clock (free running MCLK/2)
3194 * All other modes - default (free running MCLK)
3195 */
3196 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3197 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303198 writel_relaxed(((readl_relaxed(host->ioaddr +
3199 msm_host_offset->CORE_VENDOR_SPEC)
3200 & ~CORE_HC_MCLK_SEL_MASK)
3201 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3202 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003203 /*
3204 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3205 * register
3206 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303207 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003208 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303209 msm_host->enhanced_strobe)) &&
3210 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003211 /*
3212 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3213 * field in VENDOR_SPEC_FUNC
3214 */
3215 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303216 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003217 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303218 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3219 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003220 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003221 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3222 /*
3223 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3224 * CORE_DLL_STATUS to be set. This should get set
3225 * with in 15 us at 200 MHz.
3226 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303227 rc = readl_poll_timeout(host->ioaddr +
3228 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003229 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3230 CORE_DDR_DLL_LOCK)), 10, 1000);
3231 if (rc == -ETIMEDOUT)
3232 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3233 mmc_hostname(host->mmc),
3234 dll_lock);
3235 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003236 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003237 if (!msm_host->use_cdclp533)
3238 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3239 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303240 msm_host_offset->CORE_VENDOR_SPEC3)
3241 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3242 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003243
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003244 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303245 writel_relaxed(((readl_relaxed(host->ioaddr +
3246 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003247 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303248 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3249 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003250
3251 /*
3252 * Disable HC_SELECT_IN to be able to use the UHS mode select
3253 * configuration from Host Control2 register for all other
3254 * modes.
3255 *
3256 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3257 * in VENDOR_SPEC_FUNC
3258 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303259 writel_relaxed((readl_relaxed(host->ioaddr +
3260 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003261 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303262 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3263 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003264 }
3265 mb();
3266
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303267 if (sup_clock != msm_host->clk_rate) {
3268 pr_debug("%s: %s: setting clk rate to %u\n",
3269 mmc_hostname(host->mmc), __func__, sup_clock);
3270 rc = clk_set_rate(msm_host->clk, sup_clock);
3271 if (rc) {
3272 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3273 mmc_hostname(host->mmc), __func__,
3274 sup_clock, rc);
3275 goto out;
3276 }
3277 msm_host->clk_rate = sup_clock;
3278 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303279 /*
3280 * Update the bus vote in case of frequency change due to
3281 * clock scaling.
3282 */
3283 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303284 }
3285out:
3286 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303287}
3288
Sahitya Tummala14613432013-03-21 11:13:25 +05303289static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3290 unsigned int uhs)
3291{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003292 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3293 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303294 const struct sdhci_msm_offset *msm_host_offset =
3295 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303296 u16 ctrl_2;
3297
3298 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3299 /* Select Bus Speed Mode for host */
3300 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003301 if ((uhs == MMC_TIMING_MMC_HS400) ||
3302 (uhs == MMC_TIMING_MMC_HS200) ||
3303 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303304 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3305 else if (uhs == MMC_TIMING_UHS_SDR12)
3306 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3307 else if (uhs == MMC_TIMING_UHS_SDR25)
3308 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3309 else if (uhs == MMC_TIMING_UHS_SDR50)
3310 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003311 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3312 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303313 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303314 /*
3315 * When clock frquency is less than 100MHz, the feedback clock must be
3316 * provided and DLL must not be used so that tuning can be skipped. To
3317 * provide feedback clock, the mode selection can be any value less
3318 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3319 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003320 if (host->clock <= CORE_FREQ_100MHZ) {
3321 if ((uhs == MMC_TIMING_MMC_HS400) ||
3322 (uhs == MMC_TIMING_MMC_HS200) ||
3323 (uhs == MMC_TIMING_UHS_SDR104))
3324 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303325
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003326 /*
3327 * Make sure DLL is disabled when not required
3328 *
3329 * Write 1 to DLL_RST bit of DLL_CONFIG register
3330 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303331 writel_relaxed((readl_relaxed(host->ioaddr +
3332 msm_host_offset->CORE_DLL_CONFIG)
3333 | CORE_DLL_RST), host->ioaddr +
3334 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003335
3336 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303337 writel_relaxed((readl_relaxed(host->ioaddr +
3338 msm_host_offset->CORE_DLL_CONFIG)
3339 | CORE_DLL_PDN), host->ioaddr +
3340 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003341 mb();
3342
3343 /*
3344 * The DLL needs to be restored and CDCLP533 recalibrated
3345 * when the clock frequency is set back to 400MHz.
3346 */
3347 msm_host->calibration_done = false;
3348 }
3349
3350 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3351 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303352 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3353
3354}
3355
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003356#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003357#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303358static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003359{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303360 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303361 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3362 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303363 const struct sdhci_msm_offset *msm_host_offset =
3364 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303365 struct cmdq_host *cq_host = host->cq_host;
3366
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303367 u32 version = sdhci_msm_readl_relaxed(host,
3368 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003369 u16 minor = version & CORE_VERSION_TARGET_MASK;
3370 /* registers offset changed starting from 4.2.0 */
3371 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3372
3373 pr_err("---- Debug RAM dump ----\n");
3374 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3375 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3376 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3377
3378 while (i < 16) {
3379 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3380 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3381 i++;
3382 }
3383 pr_err("-------------------------\n");
3384}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303385
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303386static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3387{
3388 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3389 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3390 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3391
3392 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3393 sizeof(struct mmc_host));
3394 if (msm_host->mmc->card)
3395 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3396 sizeof(struct mmc_card));
3397 memcpy(&cached_data->copy_host, host,
3398 sizeof(struct sdhci_host));
3399}
3400
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303401void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3402{
3403 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3404 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303405 const struct sdhci_msm_offset *msm_host_offset =
3406 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303407 int tbsel, tbsel2;
3408 int i, index = 0;
3409 u32 test_bus_val = 0;
3410 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303411 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303412
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303413 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303414 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003415 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303416 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003417
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303418 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3419 sdhci_msm_readl_relaxed(host,
3420 msm_host_offset->CORE_MCI_DATA_CNT),
3421 sdhci_msm_readl_relaxed(host,
3422 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303423 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303424 sdhci_msm_readl_relaxed(host,
3425 msm_host_offset->CORE_MCI_DATA_CNT),
3426 sdhci_msm_readl_relaxed(host,
3427 msm_host_offset->CORE_MCI_FIFO_CNT),
3428 sdhci_msm_readl_relaxed(host,
3429 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303430 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303431 readl_relaxed(host->ioaddr +
3432 msm_host_offset->CORE_DLL_CONFIG),
3433 readl_relaxed(host->ioaddr +
3434 msm_host_offset->CORE_DLL_STATUS),
3435 sdhci_msm_readl_relaxed(host,
3436 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303437 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303438 readl_relaxed(host->ioaddr +
3439 msm_host_offset->CORE_VENDOR_SPEC),
3440 readl_relaxed(host->ioaddr +
3441 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3442 readl_relaxed(host->ioaddr +
3443 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303444 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303445 readl_relaxed(host->ioaddr +
3446 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303447
3448 /*
3449 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3450 * of CORE_TESTBUS_CONFIG register.
3451 *
3452 * To select test bus 0 to 7 use tbsel and to select any test bus
3453 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3454 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3455 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3456 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003457 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303458 for (tbsel = 0; tbsel < 8; tbsel++) {
3459 if (index >= MAX_TEST_BUS)
3460 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303461 test_bus_val =
3462 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3463 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3464 sdhci_msm_writel_relaxed(test_bus_val, host,
3465 msm_host_offset->CORE_TESTBUS_CONFIG);
3466 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3467 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303468 }
3469 }
3470 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3471 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3472 i, i + 3, debug_reg[i], debug_reg[i+1],
3473 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303474 if (host->is_crypto_en) {
3475 sdhci_msm_ice_get_status(host, &sts);
3476 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003477 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303478 }
3479}
3480
3481static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3482{
3483 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3484 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3485
3486 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303487 if (msm_host->ice.pdev) {
3488 if (msm_host->ice_hci_support)
3489 writel_relaxed(1, host->ioaddr +
3490 HC_VENDOR_SPECIFIC_ICE_CTRL);
3491 else
3492 writel_relaxed(1,
3493 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3494 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303495
3496 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003497}
3498
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303499/*
3500 * sdhci_msm_enhanced_strobe_mask :-
3501 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3502 * SW should write 3 to
3503 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3504 * The default reset value of this register is 2.
3505 */
3506static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3507{
3508 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3509 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303510 const struct sdhci_msm_offset *msm_host_offset =
3511 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303512
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303513 if (!msm_host->enhanced_strobe ||
3514 !mmc_card_strobe(msm_host->mmc->card)) {
3515 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303516 mmc_hostname(host->mmc));
3517 return;
3518 }
3519
3520 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303521 writel_relaxed((readl_relaxed(host->ioaddr +
3522 msm_host_offset->CORE_VENDOR_SPEC3)
3523 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3524 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303525 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303526 writel_relaxed((readl_relaxed(host->ioaddr +
3527 msm_host_offset->CORE_VENDOR_SPEC3)
3528 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3529 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303530 }
3531}
3532
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003533static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3534{
3535 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3536 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303537 const struct sdhci_msm_offset *msm_host_offset =
3538 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003539
3540 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303541 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3542 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003543 } else {
3544 u32 value;
3545
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303546 value = sdhci_msm_readl_relaxed(host,
3547 msm_host_offset->CORE_TESTBUS_CONFIG);
3548 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3549 sdhci_msm_writel_relaxed(value, host,
3550 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003551 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303552}
3553
Pavan Anamula691dd592015-08-25 16:11:20 +05303554void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3555{
3556 u32 vendor_func2;
3557 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303558 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3559 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3560 const struct sdhci_msm_offset *msm_host_offset =
3561 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303562
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303563 vendor_func2 = readl_relaxed(host->ioaddr +
3564 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303565
3566 if (enable) {
3567 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303568 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303569 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303570 while (readl_relaxed(host->ioaddr +
3571 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303572 if (timeout == 0) {
3573 pr_info("%s: Applying wait idle disable workaround\n",
3574 mmc_hostname(host->mmc));
3575 /*
3576 * Apply the reset workaround to not wait for
3577 * pending data transfers on AXI before
3578 * resetting the controller. This could be
3579 * risky if the transfers were stuck on the
3580 * AXI bus.
3581 */
3582 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303583 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303584 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303585 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3586 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303587 host->reset_wa_t = ktime_get();
3588 return;
3589 }
3590 timeout--;
3591 udelay(10);
3592 }
3593 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3594 mmc_hostname(host->mmc));
3595 } else {
3596 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303597 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303598 }
3599}
3600
Gilad Broner44445992015-09-29 16:05:39 +03003601static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3602{
3603 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303604 container_of(work, struct sdhci_msm_pm_qos_irq,
3605 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003606
3607 if (atomic_read(&pm_qos_irq->counter))
3608 return;
3609
3610 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3611 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3612}
3613
3614void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3615{
3616 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3617 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3618 struct sdhci_msm_pm_qos_latency *latency =
3619 &msm_host->pdata->pm_qos_data.irq_latency;
3620 int counter;
3621
3622 if (!msm_host->pm_qos_irq.enabled)
3623 return;
3624
3625 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3626 /* Make sure to update the voting in case power policy has changed */
3627 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3628 && counter > 1)
3629 return;
3630
Asutosh Das36c2e922015-12-01 12:19:58 +05303631 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003632 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3633 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3634 msm_host->pm_qos_irq.latency);
3635}
3636
3637void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3638{
3639 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3640 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3641 int counter;
3642
3643 if (!msm_host->pm_qos_irq.enabled)
3644 return;
3645
Subhash Jadavani4d813902015-10-15 12:16:43 -07003646 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3647 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3648 } else {
3649 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3650 return;
Gilad Broner44445992015-09-29 16:05:39 +03003651 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003652
Gilad Broner44445992015-09-29 16:05:39 +03003653 if (counter)
3654 return;
3655
3656 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303657 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3658 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003659 return;
3660 }
3661
3662 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3663 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3664 msm_host->pm_qos_irq.latency);
3665}
3666
Gilad Broner68c54562015-09-20 11:59:46 +03003667static ssize_t
3668sdhci_msm_pm_qos_irq_show(struct device *dev,
3669 struct device_attribute *attr, char *buf)
3670{
3671 struct sdhci_host *host = dev_get_drvdata(dev);
3672 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3673 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3674 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3675
3676 return snprintf(buf, PAGE_SIZE,
3677 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3678 irq->enabled, atomic_read(&irq->counter), irq->latency);
3679}
3680
3681static ssize_t
3682sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3683 struct device_attribute *attr, char *buf)
3684{
3685 struct sdhci_host *host = dev_get_drvdata(dev);
3686 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3687 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3688
3689 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3690}
3691
3692static ssize_t
3693sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3694 struct device_attribute *attr, const char *buf, size_t count)
3695{
3696 struct sdhci_host *host = dev_get_drvdata(dev);
3697 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3698 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3699 uint32_t value;
3700 bool enable;
3701 int ret;
3702
3703 ret = kstrtou32(buf, 0, &value);
3704 if (ret)
3705 goto out;
3706 enable = !!value;
3707
3708 if (enable == msm_host->pm_qos_irq.enabled)
3709 goto out;
3710
3711 msm_host->pm_qos_irq.enabled = enable;
3712 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303713 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003714 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3715 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3716 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3717 msm_host->pm_qos_irq.latency);
3718 }
3719
3720out:
3721 return count;
3722}
3723
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003724#ifdef CONFIG_SMP
3725static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3726 struct sdhci_host *host)
3727{
3728 msm_host->pm_qos_irq.req.irq = host->irq;
3729}
3730#else
3731static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3732 struct sdhci_host *host) { }
3733#endif
3734
Gilad Broner44445992015-09-29 16:05:39 +03003735void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3736{
3737 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3738 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3739 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003740 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003741
3742 if (!msm_host->pdata->pm_qos_data.irq_valid)
3743 return;
3744
3745 /* Initialize only once as this gets called per partition */
3746 if (msm_host->pm_qos_irq.enabled)
3747 return;
3748
3749 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3750 msm_host->pm_qos_irq.req.type =
3751 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003752 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3753 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3754 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003755 else
3756 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3757 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3758
Asutosh Das36c2e922015-12-01 12:19:58 +05303759 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003760 sdhci_msm_pm_qos_irq_unvote_work);
3761 /* For initialization phase, set the performance latency */
3762 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3763 msm_host->pm_qos_irq.latency =
3764 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3765 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3766 msm_host->pm_qos_irq.latency);
3767 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003768
3769 /* sysfs */
3770 msm_host->pm_qos_irq.enable_attr.show =
3771 sdhci_msm_pm_qos_irq_enable_show;
3772 msm_host->pm_qos_irq.enable_attr.store =
3773 sdhci_msm_pm_qos_irq_enable_store;
3774 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3775 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3776 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3777 ret = device_create_file(&msm_host->pdev->dev,
3778 &msm_host->pm_qos_irq.enable_attr);
3779 if (ret)
3780 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3781 __func__, ret);
3782
3783 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3784 msm_host->pm_qos_irq.status_attr.store = NULL;
3785 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3786 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3787 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3788 ret = device_create_file(&msm_host->pdev->dev,
3789 &msm_host->pm_qos_irq.status_attr);
3790 if (ret)
3791 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3792 __func__, ret);
3793}
3794
3795static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3796 struct device_attribute *attr, char *buf)
3797{
3798 struct sdhci_host *host = dev_get_drvdata(dev);
3799 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3800 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3801 struct sdhci_msm_pm_qos_group *group;
3802 int i;
3803 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3804 int offset = 0;
3805
3806 for (i = 0; i < nr_groups; i++) {
3807 group = &msm_host->pm_qos[i];
3808 offset += snprintf(&buf[offset], PAGE_SIZE,
3809 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3810 i, group->req.cpus_affine.bits[0],
3811 msm_host->pm_qos_group_enable,
3812 atomic_read(&group->counter),
3813 group->latency);
3814 }
3815
3816 return offset;
3817}
3818
3819static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3820 struct device_attribute *attr, char *buf)
3821{
3822 struct sdhci_host *host = dev_get_drvdata(dev);
3823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3824 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3825
3826 return snprintf(buf, PAGE_SIZE, "%s\n",
3827 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3828}
3829
3830static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3831 struct device_attribute *attr, const char *buf, size_t count)
3832{
3833 struct sdhci_host *host = dev_get_drvdata(dev);
3834 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3835 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3836 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3837 uint32_t value;
3838 bool enable;
3839 int ret;
3840 int i;
3841
3842 ret = kstrtou32(buf, 0, &value);
3843 if (ret)
3844 goto out;
3845 enable = !!value;
3846
3847 if (enable == msm_host->pm_qos_group_enable)
3848 goto out;
3849
3850 msm_host->pm_qos_group_enable = enable;
3851 if (!enable) {
3852 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303853 cancel_delayed_work_sync(
3854 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003855 atomic_set(&msm_host->pm_qos[i].counter, 0);
3856 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3857 pm_qos_update_request(&msm_host->pm_qos[i].req,
3858 msm_host->pm_qos[i].latency);
3859 }
3860 }
3861
3862out:
3863 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003864}
3865
3866static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3867{
3868 int i;
3869 struct sdhci_msm_cpu_group_map *map =
3870 &msm_host->pdata->pm_qos_data.cpu_group_map;
3871
3872 if (cpu < 0)
3873 goto not_found;
3874
3875 for (i = 0; i < map->nr_groups; i++)
3876 if (cpumask_test_cpu(cpu, &map->mask[i]))
3877 return i;
3878
3879not_found:
3880 return -EINVAL;
3881}
3882
3883void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3884 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3885{
3886 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3887 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3888 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3889 struct sdhci_msm_pm_qos_group *pm_qos_group;
3890 int counter;
3891
3892 if (!msm_host->pm_qos_group_enable || group < 0)
3893 return;
3894
3895 pm_qos_group = &msm_host->pm_qos[group];
3896 counter = atomic_inc_return(&pm_qos_group->counter);
3897
3898 /* Make sure to update the voting in case power policy has changed */
3899 if (pm_qos_group->latency == latency->latency[host->power_policy]
3900 && counter > 1)
3901 return;
3902
Asutosh Das36c2e922015-12-01 12:19:58 +05303903 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003904
3905 pm_qos_group->latency = latency->latency[host->power_policy];
3906 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3907}
3908
3909static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3910{
3911 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303912 container_of(work, struct sdhci_msm_pm_qos_group,
3913 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003914
3915 if (atomic_read(&group->counter))
3916 return;
3917
3918 group->latency = PM_QOS_DEFAULT_VALUE;
3919 pm_qos_update_request(&group->req, group->latency);
3920}
3921
Gilad Broner07d92eb2015-09-29 16:57:21 +03003922bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003923{
3924 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3925 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3926 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3927
3928 if (!msm_host->pm_qos_group_enable || group < 0 ||
3929 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003930 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003931
3932 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303933 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3934 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003935 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003936 }
3937
3938 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3939 pm_qos_update_request(&msm_host->pm_qos[group].req,
3940 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003941 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003942}
3943
3944void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3945 struct sdhci_msm_pm_qos_latency *latency)
3946{
3947 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3948 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3949 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3950 struct sdhci_msm_pm_qos_group *group;
3951 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003952 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003953
3954 if (msm_host->pm_qos_group_enable)
3955 return;
3956
3957 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3958 GFP_KERNEL);
3959 if (!msm_host->pm_qos)
3960 return;
3961
3962 for (i = 0; i < nr_groups; i++) {
3963 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303964 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003965 sdhci_msm_pm_qos_cpu_unvote_work);
3966 atomic_set(&group->counter, 0);
3967 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3968 cpumask_copy(&group->req.cpus_affine,
3969 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05303970 /* We set default latency here for all pm_qos cpu groups. */
3971 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03003972 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3973 group->latency);
3974 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3975 __func__, i,
3976 group->req.cpus_affine.bits[0],
3977 group->latency,
3978 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3979 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003980 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003981 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003982
3983 /* sysfs */
3984 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3985 msm_host->pm_qos_group_status_attr.store = NULL;
3986 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3987 msm_host->pm_qos_group_status_attr.attr.name =
3988 "pm_qos_cpu_groups_status";
3989 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3990 ret = device_create_file(&msm_host->pdev->dev,
3991 &msm_host->pm_qos_group_status_attr);
3992 if (ret)
3993 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3994 __func__, ret);
3995 msm_host->pm_qos_group_enable_attr.show =
3996 sdhci_msm_pm_qos_group_enable_show;
3997 msm_host->pm_qos_group_enable_attr.store =
3998 sdhci_msm_pm_qos_group_enable_store;
3999 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4000 msm_host->pm_qos_group_enable_attr.attr.name =
4001 "pm_qos_cpu_groups_enable";
4002 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4003 ret = device_create_file(&msm_host->pdev->dev,
4004 &msm_host->pm_qos_group_enable_attr);
4005 if (ret)
4006 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4007 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004008}
4009
Gilad Broner07d92eb2015-09-29 16:57:21 +03004010static void sdhci_msm_pre_req(struct sdhci_host *host,
4011 struct mmc_request *mmc_req)
4012{
4013 int cpu;
4014 int group;
4015 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4016 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4017 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4018 msm_host->pm_qos_prev_cpu);
4019
4020 sdhci_msm_pm_qos_irq_vote(host);
4021
4022 cpu = get_cpu();
4023 put_cpu();
4024 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4025 if (group < 0)
4026 return;
4027
4028 if (group != prev_group && prev_group >= 0) {
4029 sdhci_msm_pm_qos_cpu_unvote(host,
4030 msm_host->pm_qos_prev_cpu, false);
4031 prev_group = -1; /* make sure to vote for new group */
4032 }
4033
4034 if (prev_group < 0) {
4035 sdhci_msm_pm_qos_cpu_vote(host,
4036 msm_host->pdata->pm_qos_data.latency, cpu);
4037 msm_host->pm_qos_prev_cpu = cpu;
4038 }
4039}
4040
4041static void sdhci_msm_post_req(struct sdhci_host *host,
4042 struct mmc_request *mmc_req)
4043{
4044 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4045 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4046
4047 sdhci_msm_pm_qos_irq_unvote(host, false);
4048
4049 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4050 msm_host->pm_qos_prev_cpu = -1;
4051}
4052
4053static void sdhci_msm_init(struct sdhci_host *host)
4054{
4055 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4056 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4057
4058 sdhci_msm_pm_qos_irq_init(host);
4059
4060 if (msm_host->pdata->pm_qos_data.legacy_valid)
4061 sdhci_msm_pm_qos_cpu_init(host,
4062 msm_host->pdata->pm_qos_data.latency);
4063}
4064
Sahitya Tummala9150a942014-10-31 15:33:04 +05304065static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4066{
4067 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4068 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4069 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4070 u32 max_curr = 0;
4071
4072 if (curr_slot && curr_slot->vdd_data)
4073 max_curr = curr_slot->vdd_data->hpm_uA;
4074
4075 return max_curr;
4076}
4077
Asutosh Das0ef24812012-12-18 16:14:02 +05304078static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304079 .crypto_engine_cfg = sdhci_msm_ice_cfg,
4080 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304081 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304082 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004083 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304084 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004085 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304086 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304087 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304088 .get_min_clock = sdhci_msm_get_min_clock,
4089 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304090 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304091 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304092 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004093 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304094 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004095 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304096 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304097 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004098 .init = sdhci_msm_init,
4099 .pre_req = sdhci_msm_pre_req,
4100 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304101 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05304102};
4103
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304104static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4105 struct sdhci_host *host)
4106{
Krishna Konda46fd1432014-10-30 21:13:27 -07004107 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304108 u16 minor;
4109 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304110 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304111 const struct sdhci_msm_offset *msm_host_offset =
4112 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304113
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304114 version = sdhci_msm_readl_relaxed(host,
4115 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304116 major = (version & CORE_VERSION_MAJOR_MASK) >>
4117 CORE_VERSION_MAJOR_SHIFT;
4118 minor = version & CORE_VERSION_TARGET_MASK;
4119
Krishna Konda46fd1432014-10-30 21:13:27 -07004120 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4121
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304122 /*
4123 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004124 * controller won't advertise 3.0v, 1.8v and 8-bit features
4125 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304126 */
4127 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004128 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004129 /*
4130 * Enable 1.8V support capability on controllers that
4131 * support dual voltage
4132 */
4133 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004134 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4135 caps |= CORE_3_0V_SUPPORT;
4136 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004137 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304138 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4139 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304140 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004141
4142 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304143 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4144 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4145 */
4146 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304147 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304148 val = readl_relaxed(host->ioaddr +
4149 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304150 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304151 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304152 }
4153 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004154 * SDCC 5 controller with major version 1, minor version 0x34 and later
4155 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4156 */
4157 if ((major == 1) && (minor < 0x34))
4158 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004159
4160 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004161 * SDCC 5 controller with major version 1, minor version 0x42 and later
4162 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304163 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004164 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304165 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004166 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304167 msm_host->enhanced_strobe = true;
4168 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004169
4170 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004171 * SDCC 5 controller with major version 1 and minor version 0x42,
4172 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4173 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304174 * when MCLK is gated OFF, it is not gated for less than 0.5us
4175 * and MCLK must be switched on for at-least 1us before DATA
4176 * starts coming.
4177 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004178 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4179 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304180 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004181
Pavan Anamula5a256df2015-10-16 14:38:28 +05304182 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304183 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304184 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304185 writel_relaxed((readl_relaxed(host->ioaddr +
4186 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4187 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304188 }
4189
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004190 if ((major == 1) && (minor >= 0x49))
4191 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304192 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004193 * Mask 64-bit support for controller with 32-bit address bus so that
4194 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004195 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004196 if (!msm_host->pdata->largeaddressbus)
4197 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4198
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304199 writel_relaxed(caps, host->ioaddr +
4200 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004201 /* keep track of the value in SDHCI_CAPABILITIES */
4202 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304203
4204 if ((major == 1) && (minor >= 0x6b))
4205 msm_host->ice_hci_support = true;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304206}
4207
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004208#ifdef CONFIG_MMC_CQ_HCI
4209static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4210 struct platform_device *pdev)
4211{
4212 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4213 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4214
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304215 if (nocmdq) {
4216 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4217 return;
4218 }
4219
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004220 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004221 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004222 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4223 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004224 host->cq_host = NULL;
4225 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004226 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004227 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004228}
4229#else
4230static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4231 struct platform_device *pdev)
4232{
4233
4234}
4235#endif
4236
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004237static bool sdhci_msm_is_bootdevice(struct device *dev)
4238{
4239 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4240 strlen(saved_command_line))) {
4241 char search_string[50];
4242
4243 snprintf(search_string, ARRAY_SIZE(search_string),
4244 "androidboot.bootdevice=%s", dev_name(dev));
4245 if (strnstr(saved_command_line, search_string,
4246 strlen(saved_command_line)))
4247 return true;
4248 else
4249 return false;
4250 }
4251
4252 /*
4253 * "androidboot.bootdevice=" argument is not present then
4254 * return true as we don't know the boot device anyways.
4255 */
4256 return true;
4257}
4258
Asutosh Das0ef24812012-12-18 16:14:02 +05304259static int sdhci_msm_probe(struct platform_device *pdev)
4260{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304261 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304262 struct sdhci_host *host;
4263 struct sdhci_pltfm_host *pltfm_host;
4264 struct sdhci_msm_host *msm_host;
4265 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004266 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004267 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004268 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304269 struct resource *tlmm_memres = NULL;
4270 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304271 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304272
4273 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4274 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4275 GFP_KERNEL);
4276 if (!msm_host) {
4277 ret = -ENOMEM;
4278 goto out;
4279 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304280
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304281 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4282 msm_host->mci_removed = true;
4283 msm_host->offset = &sdhci_msm_offset_mci_removed;
4284 } else {
4285 msm_host->mci_removed = false;
4286 msm_host->offset = &sdhci_msm_offset_mci_present;
4287 }
4288 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304289 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4290 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4291 if (IS_ERR(host)) {
4292 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304293 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304294 }
4295
4296 pltfm_host = sdhci_priv(host);
4297 pltfm_host->priv = msm_host;
4298 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304299 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304300
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304301 /* get the ice device vops if present */
4302 ret = sdhci_msm_ice_get_dev(host);
4303 if (ret == -EPROBE_DEFER) {
4304 /*
4305 * SDHCI driver might be probed before ICE driver does.
4306 * In that case we would like to return EPROBE_DEFER code
4307 * in order to delay its probing.
4308 */
4309 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4310 __func__, ret);
4311 goto out_host_free;
4312
4313 } else if (ret == -ENODEV) {
4314 /*
4315 * ICE device is not enabled in DTS file. No need for further
4316 * initialization of ICE driver.
4317 */
4318 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4319 __func__);
4320 } else if (ret) {
4321 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4322 __func__, ret);
4323 goto out_host_free;
4324 }
4325
Asutosh Das0ef24812012-12-18 16:14:02 +05304326 /* Extract platform data */
4327 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004328 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304329 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004330 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4331 ret);
4332 goto pltfm_free;
4333 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004334
4335 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004336 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4337 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004338 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004339 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004340
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004341 if (disable_slots & (1 << (ret - 1))) {
4342 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4343 ret);
4344 ret = -ENODEV;
4345 goto pltfm_free;
4346 }
4347
Sayali Lokhande5f768322016-04-11 18:36:53 +05304348 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004349 sdhci_slot[ret-1] = msm_host;
4350
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004351 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4352 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304353 if (!msm_host->pdata) {
4354 dev_err(&pdev->dev, "DT parsing error\n");
4355 goto pltfm_free;
4356 }
4357 } else {
4358 dev_err(&pdev->dev, "No device tree node\n");
4359 goto pltfm_free;
4360 }
4361
4362 /* Setup Clocks */
4363
4364 /* Setup SDCC bus voter clock. */
4365 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4366 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4367 /* Vote for max. clk rate for max. performance */
4368 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4369 if (ret)
4370 goto pltfm_free;
4371 ret = clk_prepare_enable(msm_host->bus_clk);
4372 if (ret)
4373 goto pltfm_free;
4374 }
4375
4376 /* Setup main peripheral bus clock */
4377 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4378 if (!IS_ERR(msm_host->pclk)) {
4379 ret = clk_prepare_enable(msm_host->pclk);
4380 if (ret)
4381 goto bus_clk_disable;
4382 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304383 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304384
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304385 if (msm_host->ice.pdev) {
4386 /* Setup SDC ICE clock */
4387 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4388 if (!IS_ERR(msm_host->ice_clk)) {
4389 /* ICE core has only one clock frequency for now */
4390 ret = clk_set_rate(msm_host->ice_clk,
4391 msm_host->pdata->sup_ice_clk_table[0]);
4392 if (ret) {
4393 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4394 ret,
4395 msm_host->pdata->sup_ice_clk_table[0]);
4396 goto pclk_disable;
4397 }
4398 ret = clk_prepare_enable(msm_host->ice_clk);
4399 if (ret)
4400 goto pclk_disable;
4401
4402 msm_host->ice_clk_rate =
4403 msm_host->pdata->sup_clk_table[0];
4404 }
4405 }
4406
Asutosh Das0ef24812012-12-18 16:14:02 +05304407 /* Setup SDC MMC clock */
4408 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4409 if (IS_ERR(msm_host->clk)) {
4410 ret = PTR_ERR(msm_host->clk);
4411 goto pclk_disable;
4412 }
4413
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304414 /* Set to the minimum supported clock frequency */
4415 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4416 if (ret) {
4417 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304418 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304419 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304420 ret = clk_prepare_enable(msm_host->clk);
4421 if (ret)
4422 goto pclk_disable;
4423
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304424 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304425 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304426
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004427 /* Setup CDC calibration fixed feedback clock */
4428 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4429 if (!IS_ERR(msm_host->ff_clk)) {
4430 ret = clk_prepare_enable(msm_host->ff_clk);
4431 if (ret)
4432 goto clk_disable;
4433 }
4434
4435 /* Setup CDC calibration sleep clock */
4436 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4437 if (!IS_ERR(msm_host->sleep_clk)) {
4438 ret = clk_prepare_enable(msm_host->sleep_clk);
4439 if (ret)
4440 goto ff_clk_disable;
4441 }
4442
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004443 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4444
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304445 ret = sdhci_msm_bus_register(msm_host, pdev);
4446 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004447 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304448
4449 if (msm_host->msm_bus_vote.client_handle)
4450 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4451 sdhci_msm_bus_work);
4452 sdhci_msm_bus_voting(host, 1);
4453
Asutosh Das0ef24812012-12-18 16:14:02 +05304454 /* Setup regulators */
4455 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4456 if (ret) {
4457 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304458 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304459 }
4460
4461 /* Reset the core and Enable SDHC mode */
4462 core_memres = platform_get_resource_byname(pdev,
4463 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304464 if (!msm_host->mci_removed) {
4465 if (!core_memres) {
4466 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4467 goto vreg_deinit;
4468 }
4469 msm_host->core_mem = devm_ioremap(&pdev->dev,
4470 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304471
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304472 if (!msm_host->core_mem) {
4473 dev_err(&pdev->dev, "Failed to remap registers\n");
4474 ret = -ENOMEM;
4475 goto vreg_deinit;
4476 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304477 }
4478
Sahitya Tummala079ed852015-10-29 20:18:45 +05304479 tlmm_memres = platform_get_resource_byname(pdev,
4480 IORESOURCE_MEM, "tlmm_mem");
4481 if (tlmm_memres) {
4482 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4483 resource_size(tlmm_memres));
4484
4485 if (!tlmm_mem) {
4486 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4487 ret = -ENOMEM;
4488 goto vreg_deinit;
4489 }
4490 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4491 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4492 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4493 }
4494
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304495 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004496 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304497 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004498 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304499 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304500
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304501 if (!msm_host->mci_removed) {
4502 /* Set HC_MODE_EN bit in HC_MODE register */
4503 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304504
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304505 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4506 writel_relaxed(readl_relaxed(msm_host->core_mem +
4507 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4508 msm_host->core_mem + CORE_HC_MODE);
4509 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304510 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004511
4512 /*
4513 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4514 * be used as required later on.
4515 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304516 writel_relaxed((readl_relaxed(host->ioaddr +
4517 msm_host_offset->CORE_VENDOR_SPEC) |
4518 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4519 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304520 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304521 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4522 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4523 * interrupt in GIC (by registering the interrupt handler), we need to
4524 * ensure that any pending power irq interrupt status is acknowledged
4525 * otherwise power irq interrupt handler would be fired prematurely.
4526 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304527 irq_status = sdhci_msm_readl_relaxed(host,
4528 msm_host_offset->CORE_PWRCTL_STATUS);
4529 sdhci_msm_writel_relaxed(irq_status, host,
4530 msm_host_offset->CORE_PWRCTL_CLEAR);
4531 irq_ctl = sdhci_msm_readl_relaxed(host,
4532 msm_host_offset->CORE_PWRCTL_CTL);
4533
Subhash Jadavani28137342013-05-14 17:46:43 +05304534 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4535 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4536 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4537 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304538 sdhci_msm_writel_relaxed(irq_ctl, host,
4539 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004540
Subhash Jadavani28137342013-05-14 17:46:43 +05304541 /*
4542 * Ensure that above writes are propogated before interrupt enablement
4543 * in GIC.
4544 */
4545 mb();
4546
4547 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304548 * Following are the deviations from SDHC spec v3.0 -
4549 * 1. Card detection is handled using separate GPIO.
4550 * 2. Bus power control is handled by interacting with PMIC.
4551 */
4552 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4553 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304554 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004555 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304556 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304557 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304558 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304559 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304560 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304561 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304562
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304563 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4564 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4565
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004566 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004567 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4568 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4569 SDHCI_VENDOR_VER_SHIFT));
4570 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4571 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4572 /*
4573 * Add 40us delay in interrupt handler when
4574 * operating at initialization frequency(400KHz).
4575 */
4576 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4577 /*
4578 * Set Software Reset for DAT line in Software
4579 * Reset Register (Bit 2).
4580 */
4581 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4582 }
4583
Asutosh Das214b9662013-06-13 14:27:42 +05304584 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4585
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004586 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004587 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4588 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304589 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004590 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304591 goto vreg_deinit;
4592 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004593 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304594 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004595 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304596 if (ret) {
4597 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004598 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304599 goto vreg_deinit;
4600 }
4601
4602 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304603 sdhci_msm_writel_relaxed(INT_MASK, host,
4604 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304605
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304606#ifdef CONFIG_MMC_CLKGATE
4607 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4608 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4609#endif
4610
Asutosh Das0ef24812012-12-18 16:14:02 +05304611 /* Set host capabilities */
4612 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4613 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004614 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304615 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304616 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004617 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004618 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004619 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304620 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004621 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004622 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304623 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304624
4625 if (msm_host->pdata->nonremovable)
4626 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4627
Guoping Yuf7c91332014-08-20 16:56:18 +08004628 if (msm_host->pdata->nonhotplug)
4629 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4630
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004631 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4632
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304633 /* Initialize ICE if present */
4634 if (msm_host->ice.pdev) {
4635 ret = sdhci_msm_ice_init(host);
4636 if (ret) {
4637 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
4638 mmc_hostname(host->mmc), ret);
4639 ret = -EINVAL;
4640 goto vreg_deinit;
4641 }
4642 host->is_crypto_en = true;
4643 /* Packed commands cannot be encrypted/decrypted using ICE */
4644 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
4645 MMC_CAP2_PACKED_WR_CONTROL);
4646 }
4647
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304648 init_completion(&msm_host->pwr_irq_completion);
4649
Sahitya Tummala581df132013-03-12 14:57:46 +05304650 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304651 /*
4652 * Set up the card detect GPIO in active configuration before
4653 * configuring it as an IRQ. Otherwise, it can be in some
4654 * weird/inconsistent state resulting in flood of interrupts.
4655 */
4656 sdhci_msm_setup_pins(msm_host->pdata, true);
4657
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304658 /*
4659 * This delay is needed for stabilizing the card detect GPIO
4660 * line after changing the pull configs.
4661 */
4662 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304663 ret = mmc_gpio_request_cd(msm_host->mmc,
4664 msm_host->pdata->status_gpio, 0);
4665 if (ret) {
4666 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4667 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304668 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304669 }
4670 }
4671
Krishna Konda7feab352013-09-17 23:55:40 -07004672 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4673 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4674 host->dma_mask = DMA_BIT_MASK(64);
4675 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304676 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004677 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304678 host->dma_mask = DMA_BIT_MASK(32);
4679 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304680 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304681 } else {
4682 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4683 }
4684
Ritesh Harjani42876f42015-11-17 17:46:51 +05304685 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4686 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304687 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304688 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4689 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304690 msm_host->is_sdiowakeup_enabled = true;
4691 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4692 sdhci_msm_sdiowakeup_irq,
4693 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4694 "sdhci-msm sdiowakeup", host);
4695 if (ret) {
4696 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4697 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4698 msm_host->pdata->sdiowakeup_irq = -1;
4699 msm_host->is_sdiowakeup_enabled = false;
4700 goto vreg_deinit;
4701 } else {
4702 spin_lock_irqsave(&host->lock, flags);
4703 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304704 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304705 spin_unlock_irqrestore(&host->lock, flags);
4706 }
4707 }
4708
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004709 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304710 ret = sdhci_add_host(host);
4711 if (ret) {
4712 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304713 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304714 }
4715
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05304716 msm_host->pltfm_init_done = true;
4717
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004718 pm_runtime_set_active(&pdev->dev);
4719 pm_runtime_enable(&pdev->dev);
4720 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4721 pm_runtime_use_autosuspend(&pdev->dev);
4722
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304723 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4724 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4725 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4726 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4727 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4728 ret = device_create_file(&pdev->dev,
4729 &msm_host->msm_bus_vote.max_bus_bw);
4730 if (ret)
4731 goto remove_host;
4732
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304733 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4734 msm_host->polling.show = show_polling;
4735 msm_host->polling.store = store_polling;
4736 sysfs_attr_init(&msm_host->polling.attr);
4737 msm_host->polling.attr.name = "polling";
4738 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4739 ret = device_create_file(&pdev->dev, &msm_host->polling);
4740 if (ret)
4741 goto remove_max_bus_bw_file;
4742 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304743
4744 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4745 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4746 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4747 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4748 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4749 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4750 if (ret) {
4751 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4752 mmc_hostname(host->mmc), __func__, ret);
4753 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4754 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304755 /* Successful initialization */
4756 goto out;
4757
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304758remove_max_bus_bw_file:
4759 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304760remove_host:
4761 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004762 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304763 sdhci_remove_host(host, dead);
4764vreg_deinit:
4765 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304766bus_unregister:
4767 if (msm_host->msm_bus_vote.client_handle)
4768 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4769 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004770sleep_clk_disable:
4771 if (!IS_ERR(msm_host->sleep_clk))
4772 clk_disable_unprepare(msm_host->sleep_clk);
4773ff_clk_disable:
4774 if (!IS_ERR(msm_host->ff_clk))
4775 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304776clk_disable:
4777 if (!IS_ERR(msm_host->clk))
4778 clk_disable_unprepare(msm_host->clk);
4779pclk_disable:
4780 if (!IS_ERR(msm_host->pclk))
4781 clk_disable_unprepare(msm_host->pclk);
4782bus_clk_disable:
4783 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4784 clk_disable_unprepare(msm_host->bus_clk);
4785pltfm_free:
4786 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304787out_host_free:
4788 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304789out:
4790 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4791 return ret;
4792}
4793
4794static int sdhci_msm_remove(struct platform_device *pdev)
4795{
4796 struct sdhci_host *host = platform_get_drvdata(pdev);
4797 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4798 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4799 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4800 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4801 0xffffffff);
4802
4803 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304804 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4805 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304806 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004807 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304808 sdhci_remove_host(host, dead);
4809 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304810
Asutosh Das0ef24812012-12-18 16:14:02 +05304811 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304812
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304813 sdhci_msm_setup_pins(pdata, true);
4814 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304815
4816 if (msm_host->msm_bus_vote.client_handle) {
4817 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4818 sdhci_msm_bus_unregister(msm_host);
4819 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304820 return 0;
4821}
4822
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004823#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304824static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4825{
4826 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4827 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4828 unsigned long flags;
4829 int ret = 0;
4830
4831 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4832 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4833 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304834 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304835 return 1;
4836 }
4837
4838 spin_lock_irqsave(&host->lock, flags);
4839 if (enable) {
4840 /* configure DAT1 gpio if applicable */
4841 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304842 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304843 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4844 if (!ret)
4845 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4846 goto out;
4847 } else {
4848 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4849 mmc_hostname(host->mmc), enable);
4850 }
4851 } else {
4852 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4853 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4854 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304855 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304856 } else {
4857 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4858 mmc_hostname(host->mmc), enable);
4859
4860 }
4861 }
4862out:
4863 if (ret)
4864 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4865 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4866 ret, msm_host->pdata->sdiowakeup_irq);
4867 spin_unlock_irqrestore(&host->lock, flags);
4868 return ret;
4869}
4870
4871
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004872static int sdhci_msm_runtime_suspend(struct device *dev)
4873{
4874 struct sdhci_host *host = dev_get_drvdata(dev);
4875 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4876 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004877 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304878 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004879
Ritesh Harjani42876f42015-11-17 17:46:51 +05304880 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4881 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304882
Ritesh Harjani42876f42015-11-17 17:46:51 +05304883 sdhci_cfg_irq(host, false, true);
4884
4885defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004886 disable_irq(msm_host->pwr_irq);
4887
4888 /*
4889 * Remove the vote immediately only if clocks are off in which
4890 * case we might have queued work to remove vote but it may not
4891 * be completed before runtime suspend or system suspend.
4892 */
4893 if (!atomic_read(&msm_host->clks_on)) {
4894 if (msm_host->msm_bus_vote.client_handle)
4895 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4896 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004897 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4898 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004899
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304900 if (host->is_crypto_en) {
4901 ret = sdhci_msm_ice_suspend(host);
4902 if (ret < 0)
4903 pr_err("%s: failed to suspend crypto engine %d\n",
4904 mmc_hostname(host->mmc), ret);
4905 }
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004906 return 0;
4907}
4908
4909static int sdhci_msm_runtime_resume(struct device *dev)
4910{
4911 struct sdhci_host *host = dev_get_drvdata(dev);
4912 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4913 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004914 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304915 int ret;
4916
4917 if (host->is_crypto_en) {
4918 ret = sdhci_msm_enable_controller_clock(host);
4919 if (ret) {
4920 pr_err("%s: Failed to enable reqd clocks\n",
4921 mmc_hostname(host->mmc));
4922 goto skip_ice_resume;
4923 }
4924 ret = sdhci_msm_ice_resume(host);
4925 if (ret)
4926 pr_err("%s: failed to resume crypto engine %d\n",
4927 mmc_hostname(host->mmc), ret);
4928 }
4929skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004930
Ritesh Harjani42876f42015-11-17 17:46:51 +05304931 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4932 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304933
Ritesh Harjani42876f42015-11-17 17:46:51 +05304934 sdhci_cfg_irq(host, true, true);
4935
4936defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004937 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004938
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004939 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4940 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004941 return 0;
4942}
4943
4944static int sdhci_msm_suspend(struct device *dev)
4945{
4946 struct sdhci_host *host = dev_get_drvdata(dev);
4947 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4948 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004949 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304950 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004951 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004952
4953 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4954 (msm_host->mmc->slot.cd_irq >= 0))
4955 disable_irq(msm_host->mmc->slot.cd_irq);
4956
4957 if (pm_runtime_suspended(dev)) {
4958 pr_debug("%s: %s: already runtime suspended\n",
4959 mmc_hostname(host->mmc), __func__);
4960 goto out;
4961 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004962 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004963out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304964 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304965 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4966 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4967 if (sdio_cfg)
4968 sdhci_cfg_irq(host, false, true);
4969 }
4970
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004971 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4972 ktime_to_us(ktime_sub(ktime_get(), start)));
4973 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004974}
4975
4976static int sdhci_msm_resume(struct device *dev)
4977{
4978 struct sdhci_host *host = dev_get_drvdata(dev);
4979 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4980 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4981 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304982 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004983 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004984
4985 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4986 (msm_host->mmc->slot.cd_irq >= 0))
4987 enable_irq(msm_host->mmc->slot.cd_irq);
4988
4989 if (pm_runtime_suspended(dev)) {
4990 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4991 mmc_hostname(host->mmc), __func__);
4992 goto out;
4993 }
4994
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004995 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004996out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304997 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4998 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4999 if (sdio_cfg)
5000 sdhci_cfg_irq(host, true, true);
5001 }
5002
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005003 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5004 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005005 return ret;
5006}
5007
Ritesh Harjani42876f42015-11-17 17:46:51 +05305008static int sdhci_msm_suspend_noirq(struct device *dev)
5009{
5010 struct sdhci_host *host = dev_get_drvdata(dev);
5011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5013 int ret = 0;
5014
5015 /*
5016 * ksdioirqd may be running, hence retry
5017 * suspend in case the clocks are ON
5018 */
5019 if (atomic_read(&msm_host->clks_on)) {
5020 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5021 mmc_hostname(host->mmc), __func__);
5022 ret = -EAGAIN;
5023 }
5024
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305025 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5026 if (msm_host->sdio_pending_processing)
5027 ret = -EBUSY;
5028
Ritesh Harjani42876f42015-11-17 17:46:51 +05305029 return ret;
5030}
5031
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005032static const struct dev_pm_ops sdhci_msm_pmops = {
5033 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
5034 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5035 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305036 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005037};
5038
5039#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5040
5041#else
5042#define SDHCI_MSM_PMOPS NULL
5043#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305044static const struct of_device_id sdhci_msm_dt_match[] = {
5045 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305046 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005047 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305048};
5049MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5050
5051static struct platform_driver sdhci_msm_driver = {
5052 .probe = sdhci_msm_probe,
5053 .remove = sdhci_msm_remove,
5054 .driver = {
5055 .name = "sdhci_msm",
5056 .owner = THIS_MODULE,
5057 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005058 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305059 },
5060};
5061
5062module_platform_driver(sdhci_msm_driver);
5063
5064MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5065MODULE_LICENSE("GPL v2");