blob: 959f8eece32936ad7c06ac15c4301f28c1eaddb1 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07005 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Asutosh Das1c43b132018-01-11 18:08:40 +053042#include <linux/nvmem-consumer.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020043#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053044
Sahitya Tummala56874732015-05-21 08:24:03 +053045#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053046#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070047#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053048
Asutosh Das36c2e922015-12-01 12:19:58 +053049#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070053#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080054
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080063#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Asutosh Das0ef24812012-12-18 16:14:02 +053072#define CORE_PWRCTL_BUS_OFF 0x01
73#define CORE_PWRCTL_BUS_ON (1 << 1)
74#define CORE_PWRCTL_IO_LOW (1 << 2)
75#define CORE_PWRCTL_IO_HIGH (1 << 3)
76
77#define CORE_PWRCTL_BUS_SUCCESS 0x01
78#define CORE_PWRCTL_BUS_FAIL (1 << 1)
79#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
80#define CORE_PWRCTL_IO_FAIL (1 << 3)
81
82#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070083#define MAX_PHASES 16
84
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070085#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070086#define CORE_DLL_EN (1 << 16)
87#define CORE_CDR_EN (1 << 17)
88#define CORE_CK_OUT_EN (1 << 18)
89#define CORE_CDR_EXT_EN (1 << 19)
90#define CORE_DLL_PDN (1 << 29)
91#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070093#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070094#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095
Krishna Konda46fd1432014-10-30 21:13:27 -070096#define CORE_CLK_PWRSAVE (1 << 1)
Ritesh Harjanib5b129b2018-10-26 18:21:44 +053097#define CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN (1 << 7)
Krishna Konda46fd1432014-10-30 21:13:27 -070098#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
99#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
100#define CORE_HC_MCLK_SEL_MASK (3 << 8)
101#define CORE_HC_AUTO_CMD21_EN (1 << 6)
102#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700103#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700104#define CORE_HC_SELECT_IN_EN (1 << 18)
105#define CORE_HC_SELECT_IN_HS400 (6 << 19)
106#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700107#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700108
Pavan Anamula691dd592015-08-25 16:11:20 +0530109#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
110#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530111#define CORE_ONE_MID_EN (1 << 25)
112
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530113#define CORE_8_BIT_SUPPORT (1 << 18)
114#define CORE_3_3V_SUPPORT (1 << 24)
115#define CORE_3_0V_SUPPORT (1 << 25)
116#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300117#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700118
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700119#define CORE_CSR_CDC_CTLR_CFG0 0x130
120#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
121#define CORE_HW_AUTOCAL_ENA (1 << 17)
122
123#define CORE_CSR_CDC_CTLR_CFG1 0x134
124#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
125#define CORE_TIMER_ENA (1 << 16)
126
127#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
128#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
129#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
130#define CORE_CDC_OFFSET_CFG 0x14C
131#define CORE_CSR_CDC_DELAY_CFG 0x150
132#define CORE_CDC_SLAVE_DDA_CFG 0x160
133#define CORE_CSR_CDC_STATUS0 0x164
134#define CORE_CALIBRATION_DONE (1 << 0)
135
136#define CORE_CDC_ERROR_CODE_MASK 0x7000000
137
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300138#define CQ_CMD_DBG_RAM 0x110
139#define CQ_CMD_DBG_RAM_WA 0x150
140#define CQ_CMD_DBG_RAM_OL 0x154
141
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700142#define CORE_CSR_CDC_GEN_CFG 0x178
143#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
144#define CORE_CDC_SWITCH_RC_EN (1 << 1)
145
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700146#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530147#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700148#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530151#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530152#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700154#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800155#define CORE_FLL_CYCLE_CNT (1 << 18)
156#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700157
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700158#define DDR_CONFIG_POR_VAL 0x80040873
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530159#define DLL_USR_CTL_POR_VAL 0x10800
160#define ENABLE_DLL_LOCK_STATUS (1 << 26)
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700161#define FINE_TUNE_MODE_EN (1 << 27)
162#define BIAS_OK_SIGNAL (1 << 29)
163#define DLL_CONFIG_3_POR_VAL 0x10
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700164
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700165/* 512 descriptors */
166#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530167#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530168
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700169#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800170#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700171
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700172#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530173#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700174
Krishna Konda96e6b112013-10-28 15:25:03 -0700175#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200176#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200177#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700178
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +0530179#define RCLK_TOGGLE 0x2
180
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530181struct sdhci_msm_offset {
182 u32 CORE_MCI_DATA_CNT;
183 u32 CORE_MCI_STATUS;
184 u32 CORE_MCI_FIFO_CNT;
185 u32 CORE_MCI_VERSION;
186 u32 CORE_GENERICS;
187 u32 CORE_TESTBUS_CONFIG;
188 u32 CORE_TESTBUS_SEL2_BIT;
189 u32 CORE_TESTBUS_ENA;
190 u32 CORE_TESTBUS_SEL2;
191 u32 CORE_PWRCTL_STATUS;
192 u32 CORE_PWRCTL_MASK;
193 u32 CORE_PWRCTL_CLEAR;
194 u32 CORE_PWRCTL_CTL;
195 u32 CORE_SDCC_DEBUG_REG;
196 u32 CORE_DLL_CONFIG;
197 u32 CORE_DLL_STATUS;
198 u32 CORE_VENDOR_SPEC;
199 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
200 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
201 u32 CORE_VENDOR_SPEC_FUNC2;
202 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
203 u32 CORE_DDR_200_CFG;
204 u32 CORE_VENDOR_SPEC3;
205 u32 CORE_DLL_CONFIG_2;
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700206 u32 CORE_DLL_CONFIG_3;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530207 u32 CORE_DDR_CONFIG;
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700208 u32 CORE_DDR_CONFIG_OLD; /* Applcable to sddcc minor ver < 0x49 only */
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530209 u32 CORE_DLL_USR_CTL; /* Present on SDCC5.1 onwards */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530210};
211
212struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
213 .CORE_MCI_DATA_CNT = 0x35C,
214 .CORE_MCI_STATUS = 0x324,
215 .CORE_MCI_FIFO_CNT = 0x308,
216 .CORE_MCI_VERSION = 0x318,
217 .CORE_GENERICS = 0x320,
218 .CORE_TESTBUS_CONFIG = 0x32C,
219 .CORE_TESTBUS_SEL2_BIT = 3,
220 .CORE_TESTBUS_ENA = (1 << 31),
221 .CORE_TESTBUS_SEL2 = (1 << 3),
222 .CORE_PWRCTL_STATUS = 0x240,
223 .CORE_PWRCTL_MASK = 0x244,
224 .CORE_PWRCTL_CLEAR = 0x248,
225 .CORE_PWRCTL_CTL = 0x24C,
226 .CORE_SDCC_DEBUG_REG = 0x358,
227 .CORE_DLL_CONFIG = 0x200,
228 .CORE_DLL_STATUS = 0x208,
229 .CORE_VENDOR_SPEC = 0x20C,
230 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
231 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
232 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
233 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
234 .CORE_DDR_200_CFG = 0x224,
235 .CORE_VENDOR_SPEC3 = 0x250,
236 .CORE_DLL_CONFIG_2 = 0x254,
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700237 .CORE_DLL_CONFIG_3 = 0x258,
238 .CORE_DDR_CONFIG = 0x25C,
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530239 .CORE_DLL_USR_CTL = 0x388,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530240};
241
242struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
243 .CORE_MCI_DATA_CNT = 0x30,
244 .CORE_MCI_STATUS = 0x34,
245 .CORE_MCI_FIFO_CNT = 0x44,
246 .CORE_MCI_VERSION = 0x050,
247 .CORE_GENERICS = 0x70,
248 .CORE_TESTBUS_CONFIG = 0x0CC,
249 .CORE_TESTBUS_SEL2_BIT = 4,
250 .CORE_TESTBUS_ENA = (1 << 3),
251 .CORE_TESTBUS_SEL2 = (1 << 4),
252 .CORE_PWRCTL_STATUS = 0xDC,
253 .CORE_PWRCTL_MASK = 0xE0,
254 .CORE_PWRCTL_CLEAR = 0xE4,
255 .CORE_PWRCTL_CTL = 0xE8,
256 .CORE_SDCC_DEBUG_REG = 0x124,
257 .CORE_DLL_CONFIG = 0x100,
258 .CORE_DLL_STATUS = 0x108,
259 .CORE_VENDOR_SPEC = 0x10C,
260 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
261 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
262 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
263 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
264 .CORE_DDR_200_CFG = 0x184,
265 .CORE_VENDOR_SPEC3 = 0x1B0,
266 .CORE_DLL_CONFIG_2 = 0x1B4,
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700267 .CORE_DLL_CONFIG_3 = 0x1B8,
268 .CORE_DDR_CONFIG_OLD = 0x1B8, /* Applicable to sdcc minor ver < 0x49 */
269 .CORE_DDR_CONFIG = 0x1BC,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530270};
271
272u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
273{
274 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
275 struct sdhci_msm_host *msm_host = pltfm_host->priv;
276 void __iomem *base_addr;
277
278 if (msm_host->mci_removed)
279 base_addr = host->ioaddr;
280 else
281 base_addr = msm_host->core_mem;
282
283 return readb_relaxed(base_addr + offset);
284}
285
286u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
287{
288 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
289 struct sdhci_msm_host *msm_host = pltfm_host->priv;
290 void __iomem *base_addr;
291
292 if (msm_host->mci_removed)
293 base_addr = host->ioaddr;
294 else
295 base_addr = msm_host->core_mem;
296
297 return readl_relaxed(base_addr + offset);
298}
299
300void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
301{
302 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
303 struct sdhci_msm_host *msm_host = pltfm_host->priv;
304 void __iomem *base_addr;
305
306 if (msm_host->mci_removed)
307 base_addr = host->ioaddr;
308 else
309 base_addr = msm_host->core_mem;
310
311 writeb_relaxed(val, base_addr + offset);
312}
313
314void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
315{
316 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
317 struct sdhci_msm_host *msm_host = pltfm_host->priv;
318 void __iomem *base_addr;
319
320 if (msm_host->mci_removed)
321 base_addr = host->ioaddr;
322 else
323 base_addr = msm_host->core_mem;
324
325 writel_relaxed(val, base_addr + offset);
326}
327
Ritesh Harjani82124772014-11-04 15:34:00 +0530328/* Timeout value to avoid infinite waiting for pwr_irq */
329#define MSM_PWR_IRQ_TIMEOUT_MS 5000
330
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700331static const u32 tuning_block_64[] = {
332 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
333 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
334 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
335 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
336};
337
338static const u32 tuning_block_128[] = {
339 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
340 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
341 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
342 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
343 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
344 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
345 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
346 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
347};
Asutosh Das0ef24812012-12-18 16:14:02 +0530348
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700349/* global to hold each slot instance for debug */
350static struct sdhci_msm_host *sdhci_slot[2];
351
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700352static int disable_slots;
353/* root can write, others read */
354module_param(disable_slots, int, S_IRUGO|S_IWUSR);
355
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530356static bool nocmdq;
357module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
358
Asutosh Das0ef24812012-12-18 16:14:02 +0530359enum vdd_io_level {
360 /* set vdd_io_data->low_vol_level */
361 VDD_IO_LOW,
362 /* set vdd_io_data->high_vol_level */
363 VDD_IO_HIGH,
364 /*
365 * set whatever there in voltage_level (third argument) of
366 * sdhci_msm_set_vdd_io_vol() function.
367 */
368 VDD_IO_SET_LEVEL,
369};
370
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -0800371enum dll_init_context {
372 DLL_INIT_NORMAL = 0,
373 DLL_INIT_FROM_CX_COLLAPSE_EXIT,
374};
375
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700376static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
377 u32 req_clk);
378
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700379/* MSM platform specific tuning */
380static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
381 u8 poll)
382{
383 int rc = 0;
384 u32 wait_cnt = 50;
385 u8 ck_out_en = 0;
386 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530387 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
388 struct sdhci_msm_host *msm_host = pltfm_host->priv;
389 const struct sdhci_msm_offset *msm_host_offset =
390 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700391
392 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530393 ck_out_en = !!(readl_relaxed(host->ioaddr +
394 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700395
396 while (ck_out_en != poll) {
397 if (--wait_cnt == 0) {
398 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
399 mmc_hostname(mmc), __func__, poll);
400 rc = -ETIMEDOUT;
401 goto out;
402 }
403 udelay(1);
404
405 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530406 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700407 }
408out:
409 return rc;
410}
411
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530412/*
413 * Enable CDR to track changes of DAT lines and adjust sampling
414 * point according to voltage/temperature variations
415 */
416static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
417{
418 int rc = 0;
419 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530420 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
421 struct sdhci_msm_host *msm_host = pltfm_host->priv;
422 const struct sdhci_msm_offset *msm_host_offset =
423 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530424
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530425 config = readl_relaxed(host->ioaddr +
426 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530427 config |= CORE_CDR_EN;
428 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530429 writel_relaxed(config, host->ioaddr +
430 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530431
432 rc = msm_dll_poll_ck_out_en(host, 0);
433 if (rc)
434 goto err;
435
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530436 writel_relaxed((readl_relaxed(host->ioaddr +
437 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
438 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530439
440 rc = msm_dll_poll_ck_out_en(host, 1);
441 if (rc)
442 goto err;
443 goto out;
444err:
445 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
446out:
447 return rc;
448}
449
450static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
451 *attr, const char *buf, size_t count)
452{
453 struct sdhci_host *host = dev_get_drvdata(dev);
454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
456 u32 tmp;
457 unsigned long flags;
458
459 if (!kstrtou32(buf, 0, &tmp)) {
460 spin_lock_irqsave(&host->lock, flags);
461 msm_host->en_auto_cmd21 = !!tmp;
462 spin_unlock_irqrestore(&host->lock, flags);
463 }
464 return count;
465}
466
467static ssize_t show_auto_cmd21(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct sdhci_host *host = dev_get_drvdata(dev);
471 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
472 struct sdhci_msm_host *msm_host = pltfm_host->priv;
473
474 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
475}
476
477/* MSM auto-tuning handler */
478static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
479 bool enable,
480 u32 type)
481{
482 int rc = 0;
483 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
484 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530485 const struct sdhci_msm_offset *msm_host_offset =
486 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530487 u32 val = 0;
488
489 if (!msm_host->en_auto_cmd21)
490 return 0;
491
492 if (type == MMC_SEND_TUNING_BLOCK_HS200)
493 val = CORE_HC_AUTO_CMD21_EN;
494 else
495 return 0;
496
497 if (enable) {
498 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530499 writel_relaxed(readl_relaxed(host->ioaddr +
500 msm_host_offset->CORE_VENDOR_SPEC) | val,
501 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530502 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530503 writel_relaxed(readl_relaxed(host->ioaddr +
504 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
505 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530506 }
507 return rc;
508}
509
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700510static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
511{
512 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530513 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
514 struct sdhci_msm_host *msm_host = pltfm_host->priv;
515 const struct sdhci_msm_offset *msm_host_offset =
516 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700517 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
518 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
519 0x8};
520 unsigned long flags;
521 u32 config;
522 struct mmc_host *mmc = host->mmc;
523
524 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
525 spin_lock_irqsave(&host->lock, flags);
526
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 config = readl_relaxed(host->ioaddr +
528 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700529 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
530 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530531 writel_relaxed(config, host->ioaddr +
532 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700533
534 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
535 rc = msm_dll_poll_ck_out_en(host, 0);
536 if (rc)
537 goto err_out;
538
539 /*
540 * Write the selected DLL clock output phase (0 ... 15)
541 * to CDR_SELEXT bit field of DLL_CONFIG register.
542 */
Veerabhadrarao Badiganti58f639e2019-07-01 19:35:01 +0530543 writel_relaxed(((readl_relaxed(host->ioaddr +
544 msm_host_offset->CORE_DLL_CONFIG)
545 & ~(0xF << 20))
546 | (grey_coded_phase_table[phase] << 20)),
547 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700548
549 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530550 writel_relaxed((readl_relaxed(host->ioaddr +
551 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
552 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700553
554 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
555 rc = msm_dll_poll_ck_out_en(host, 1);
556 if (rc)
557 goto err_out;
558
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530559 config = readl_relaxed(host->ioaddr +
560 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700561 config |= CORE_CDR_EN;
562 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530563 writel_relaxed(config, host->ioaddr +
564 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700565 goto out;
566
567err_out:
568 pr_err("%s: %s: Failed to set DLL phase: %d\n",
569 mmc_hostname(mmc), __func__, phase);
570out:
571 spin_unlock_irqrestore(&host->lock, flags);
572 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
573 return rc;
574}
575
576/*
577 * Find out the greatest range of consecuitive selected
578 * DLL clock output phases that can be used as sampling
579 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700580 * timing mode) or for eMMC4.5 card read operation (in
581 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700582 * Select the 3/4 of the range and configure the DLL with the
583 * selected DLL clock output phase.
584 */
585
586static int msm_find_most_appropriate_phase(struct sdhci_host *host,
587 u8 *phase_table, u8 total_phases)
588{
589 int ret;
590 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
591 u8 phases_per_row[MAX_PHASES] = {0};
592 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
593 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
594 bool phase_0_found = false, phase_15_found = false;
595 struct mmc_host *mmc = host->mmc;
596
597 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
598 if (!total_phases || (total_phases > MAX_PHASES)) {
599 pr_err("%s: %s: invalid argument: total_phases=%d\n",
600 mmc_hostname(mmc), __func__, total_phases);
601 return -EINVAL;
602 }
603
604 for (cnt = 0; cnt < total_phases; cnt++) {
605 ranges[row_index][col_index] = phase_table[cnt];
606 phases_per_row[row_index] += 1;
607 col_index++;
608
609 if ((cnt + 1) == total_phases) {
610 continue;
611 /* check if next phase in phase_table is consecutive or not */
612 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
613 row_index++;
614 col_index = 0;
615 }
616 }
617
618 if (row_index >= MAX_PHASES)
619 return -EINVAL;
620
621 /* Check if phase-0 is present in first valid window? */
622 if (!ranges[0][0]) {
623 phase_0_found = true;
624 phase_0_raw_index = 0;
625 /* Check if cycle exist between 2 valid windows */
626 for (cnt = 1; cnt <= row_index; cnt++) {
627 if (phases_per_row[cnt]) {
628 for (i = 0; i < phases_per_row[cnt]; i++) {
629 if (ranges[cnt][i] == 15) {
630 phase_15_found = true;
631 phase_15_raw_index = cnt;
632 break;
633 }
634 }
635 }
636 }
637 }
638
639 /* If 2 valid windows form cycle then merge them as single window */
640 if (phase_0_found && phase_15_found) {
641 /* number of phases in raw where phase 0 is present */
642 u8 phases_0 = phases_per_row[phase_0_raw_index];
643 /* number of phases in raw where phase 15 is present */
644 u8 phases_15 = phases_per_row[phase_15_raw_index];
645
646 if (phases_0 + phases_15 >= MAX_PHASES)
647 /*
648 * If there are more than 1 phase windows then total
649 * number of phases in both the windows should not be
650 * more than or equal to MAX_PHASES.
651 */
652 return -EINVAL;
653
654 /* Merge 2 cyclic windows */
655 i = phases_15;
656 for (cnt = 0; cnt < phases_0; cnt++) {
657 ranges[phase_15_raw_index][i] =
658 ranges[phase_0_raw_index][cnt];
659 if (++i >= MAX_PHASES)
660 break;
661 }
662
663 phases_per_row[phase_0_raw_index] = 0;
664 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
665 }
666
667 for (cnt = 0; cnt <= row_index; cnt++) {
668 if (phases_per_row[cnt] > curr_max) {
669 curr_max = phases_per_row[cnt];
670 selected_row_index = cnt;
671 }
672 }
673
674 i = ((curr_max * 3) / 4);
675 if (i)
676 i--;
677
678 ret = (int)ranges[selected_row_index][i];
679
680 if (ret >= MAX_PHASES) {
681 ret = -EINVAL;
682 pr_err("%s: %s: invalid phase selected=%d\n",
683 mmc_hostname(mmc), __func__, ret);
684 }
685
686 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
687 return ret;
688}
689
690static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
691{
692 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530693 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
694 struct sdhci_msm_host *msm_host = pltfm_host->priv;
695 const struct sdhci_msm_offset *msm_host_offset =
696 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700697
698 /* Program the MCLK value to MCLK_FREQ bit field */
699 if (host->clock <= 112000000)
700 mclk_freq = 0;
701 else if (host->clock <= 125000000)
702 mclk_freq = 1;
703 else if (host->clock <= 137000000)
704 mclk_freq = 2;
705 else if (host->clock <= 150000000)
706 mclk_freq = 3;
707 else if (host->clock <= 162000000)
708 mclk_freq = 4;
709 else if (host->clock <= 175000000)
710 mclk_freq = 5;
711 else if (host->clock <= 187000000)
712 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700713 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714 mclk_freq = 7;
715
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530716 writel_relaxed(((readl_relaxed(host->ioaddr +
717 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700718 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530719 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700720}
721
722/* Initialize the DLL (Programmable Delay Line ) */
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -0800723static int msm_init_cm_dll(struct sdhci_host *host,
724 enum dll_init_context init_context)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700725{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800726 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
727 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530728 const struct sdhci_msm_offset *msm_host_offset =
729 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700730 struct mmc_host *mmc = host->mmc;
731 int rc = 0;
732 unsigned long flags;
733 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530734 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700735
736 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
737 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530738 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
739 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530740 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700741 /*
742 * Make sure that clock is always enabled when DLL
743 * tuning is in progress. Keeping PWRSAVE ON may
744 * turn off the clock. So let's disable the PWRSAVE
745 * here and re-enable it once tuning is completed.
746 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530747 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_VENDOR_SPEC)
750 & ~CORE_CLK_PWRSAVE), host->ioaddr +
751 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530752 curr_pwrsave = false;
753 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700754
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800755 if (msm_host->use_updated_dll_reset) {
756 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530757 writel_relaxed((readl_relaxed(host->ioaddr +
758 msm_host_offset->CORE_DLL_CONFIG)
759 & ~CORE_CK_OUT_EN), host->ioaddr +
760 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800761
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530762 writel_relaxed((readl_relaxed(host->ioaddr +
763 msm_host_offset->CORE_DLL_CONFIG_2)
764 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
765 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800766 }
767
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700768 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530769 writel_relaxed((readl_relaxed(host->ioaddr +
770 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
771 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700772
773 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530774 writel_relaxed((readl_relaxed(host->ioaddr +
775 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
776 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700777
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800778 if (msm_host->use_updated_dll_reset) {
779 u32 mclk_freq = 0;
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700780 u32 actual_clk = sdhci_msm_get_sup_clk_rate(host, host->clock);
781
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -0800782 /*
783 * Only configure the mclk_freq in normal DLL init
784 * context. If the DLL init is coming from
785 * CX Collapse Exit context, the host->clock may be zero.
786 * The DLL_CONFIG_2 register has already been restored to
787 * proper value prior to getting here.
788 */
789 if (init_context == DLL_INIT_NORMAL) {
790 switch (actual_clk) {
791 case 202000000:
792 case 201500000:
793 case 200000000:
794 mclk_freq = 42;
795 break;
796 case 192000000:
797 mclk_freq = 40;
798 break;
799 default:
800 mclk_freq = (u32)((actual_clk / TCXO_FREQ) * 4);
801 pr_info_once("%s: %s: Non standard clk freq =%u\n",
802 mmc_hostname(mmc), __func__, actual_clk);
803 }
804
805 if ((readl_relaxed(host->ioaddr +
806 msm_host_offset->CORE_DLL_CONFIG_2)
807 & CORE_FLL_CYCLE_CNT))
808 mclk_freq *= 2;
809
810 writel_relaxed(((readl_relaxed(host->ioaddr +
811 msm_host_offset->CORE_DLL_CONFIG_2)
812 & ~(0xFF << 10)) | (mclk_freq << 10)),
813 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700814 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800815 /* wait for 5us before enabling DLL clock */
816 udelay(5);
817 }
818
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700819 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530820 writel_relaxed((readl_relaxed(host->ioaddr +
821 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
822 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700823
824 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530825 writel_relaxed((readl_relaxed(host->ioaddr +
826 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
827 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700828
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800829 if (msm_host->use_updated_dll_reset) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800830 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530831 writel_relaxed((readl_relaxed(host->ioaddr +
832 msm_host_offset->CORE_DLL_CONFIG_2)
833 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
834 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800835 }
836
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530837 /*
838 * Configure DLL user control register to enable DLL status
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700839 * This setting is applicable to SDCC v5.1 onwards only.
840 *
841 * Configure Tassadar DLL (Only applicable for 7FF projects)
842 *
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530843 */
844 if (msm_host->need_dll_user_ctl) {
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -0700845 if (msm_host->dll_hsr) {
846 writel_relaxed(msm_host->dll_hsr->dll_usr_ctl,
847 host->ioaddr +
848 msm_host_offset->CORE_DLL_USR_CTL);
849 writel_relaxed(msm_host->dll_hsr->dll_config_3,
850 host->ioaddr +
851 msm_host_offset->CORE_DLL_CONFIG_3);
852 } else {
853 writel_relaxed(DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
854 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL,
855 host->ioaddr +
856 msm_host_offset->CORE_DLL_USR_CTL);
857
858 writel_relaxed(DLL_CONFIG_3_POR_VAL, host->ioaddr +
859 msm_host_offset->CORE_DLL_CONFIG_3);
860 }
861 }
862
863 /*
864 * Update the lower byte of DLL_CONFIG only with HSR values.
865 * Since these are the static settings.
866 */
867 if (msm_host->dll_hsr) {
868 writel_relaxed(((readl_relaxed(host->ioaddr +
869 msm_host_offset->CORE_DLL_CONFIG) & (~0xff)) |
870 (msm_host->dll_hsr->dll_config & 0xff)),
871 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530872 }
873
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700874 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530875 writel_relaxed((readl_relaxed(host->ioaddr +
876 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
877 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700878
879 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530880 writel_relaxed((readl_relaxed(host->ioaddr +
881 msm_host_offset->CORE_DLL_CONFIG)
882 | CORE_CK_OUT_EN), host->ioaddr +
883 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700884
885 wait_cnt = 50;
886 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530887 while (!(readl_relaxed(host->ioaddr +
888 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700889 /* max. wait for 50us sec for LOCK bit to be set */
890 if (--wait_cnt == 0) {
891 pr_err("%s: %s: DLL failed to LOCK\n",
892 mmc_hostname(mmc), __func__);
893 rc = -ETIMEDOUT;
894 goto out;
895 }
896 /* wait for 1us before polling again */
897 udelay(1);
898 }
899
900out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530901 /* Restore the correct PWRSAVE state */
902 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530903 u32 reg = readl_relaxed(host->ioaddr +
904 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530905
906 if (prev_pwrsave)
907 reg |= CORE_CLK_PWRSAVE;
908 else
909 reg &= ~CORE_CLK_PWRSAVE;
910
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530911 writel_relaxed(reg, host->ioaddr +
912 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530913 }
914
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700915 spin_unlock_irqrestore(&host->lock, flags);
916 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
917 return rc;
918}
919
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700920static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
921{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700922 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700923 int ret = 0;
924 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530925 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
926 struct sdhci_msm_host *msm_host = pltfm_host->priv;
927 const struct sdhci_msm_offset *msm_host_offset =
928 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700929
930 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
931
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700932 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530933 writel_relaxed((readl_relaxed(host->ioaddr +
934 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700935 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530936 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700937
938 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
939 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
940 & ~CORE_CDC_SWITCH_BYPASS_OFF),
941 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
942
943 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
944 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
945 | CORE_CDC_SWITCH_RC_EN),
946 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
947
948 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530949 writel_relaxed((readl_relaxed(host->ioaddr +
950 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700951 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530952 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700953
954 /*
955 * Perform CDC Register Initialization Sequence
956 *
957 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
958 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
959 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
960 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
961 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
962 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
963 * CORE_CSR_CDC_DELAY_CFG 0x3AC
964 * CORE_CDC_OFFSET_CFG 0x0
965 * CORE_CDC_SLAVE_DDA_CFG 0x16334
966 */
967
968 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
969 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
970 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
971 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
972 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
973 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700974 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700975 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
976 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
977
978 /* CDC HW Calibration */
979
980 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
981 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
982 | CORE_SW_TRIG_FULL_CALIB),
983 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
984
985 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
986 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
987 & ~CORE_SW_TRIG_FULL_CALIB),
988 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
989
990 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
991 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
992 | CORE_HW_AUTOCAL_ENA),
993 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
994
995 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
996 writel_relaxed((readl_relaxed(host->ioaddr +
997 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
998 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
999
1000 mb();
1001
1002 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001003 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
1004 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
1005
1006 if (ret == -ETIMEDOUT) {
1007 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001008 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001009 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001010 }
1011
1012 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
1013 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
1014 & CORE_CDC_ERROR_CODE_MASK;
1015 if (cdc_err) {
1016 pr_err("%s: %s: CDC Error Code %d\n",
1017 mmc_hostname(host->mmc), __func__, cdc_err);
1018 ret = -EINVAL;
1019 goto out;
1020 }
1021
1022 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301023 writel_relaxed((readl_relaxed(host->ioaddr +
1024 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001025 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301026 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001027out:
1028 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1029 __func__, ret);
1030 return ret;
1031}
1032
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001033static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
1034{
Ritesh Harjani764065e2015-05-13 14:14:45 +05301035 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1036 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301037 const struct sdhci_msm_offset *msm_host_offset =
1038 msm_host->offset;
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07001039 u32 dll_status;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001040 int ret = 0;
1041
1042 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1043
1044 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +05301045 * Reprogramming the value in case it might have been modified by
1046 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001047 */
Vijay Viswanatha5492612017-10-17 15:38:55 +05301048 if (msm_host->pdata->rclk_wa) {
1049 writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301050 msm_host_offset->CORE_DDR_CONFIG);
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07001051 } else if (msm_host->dll_hsr && msm_host->dll_hsr->ddr_config) {
1052 writel_relaxed(msm_host->dll_hsr->ddr_config, host->ioaddr +
1053 msm_host_offset->CORE_DDR_CONFIG);
1054 } else if (msm_host->rclk_delay_fix) {
1055 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
1056 msm_host_offset->CORE_DDR_CONFIG);
1057 } else {
1058 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
1059 msm_host_offset->CORE_DDR_CONFIG_OLD);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07001060 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001061
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301062 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301063 writel_relaxed((readl_relaxed(host->ioaddr +
1064 msm_host_offset->CORE_DDR_200_CFG)
1065 | CORE_CMDIN_RCLK_EN), host->ioaddr +
1066 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +05301067
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001068 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301069 writel_relaxed((readl_relaxed(host->ioaddr +
1070 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001071 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301072 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001073
1074 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301075 ret = readl_poll_timeout(host->ioaddr +
1076 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001077 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
1078
1079 if (ret == -ETIMEDOUT) {
1080 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1081 mmc_hostname(host->mmc), __func__);
1082 goto out;
1083 }
1084
Ritesh Harjani764065e2015-05-13 14:14:45 +05301085 /*
1086 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1087 * when MCLK is gated OFF, it is not gated for less than 0.5us
1088 * and MCLK must be switched on for at-least 1us before DATA
1089 * starts coming. Controllers with 14lpp tech DLL cannot
1090 * guarantee above requirement. So PWRSAVE_DLL should not be
1091 * turned on for host controllers using this DLL.
1092 */
1093 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301094 writel_relaxed((readl_relaxed(host->ioaddr +
1095 msm_host_offset->CORE_VENDOR_SPEC3)
1096 | CORE_PWRSAVE_DLL), host->ioaddr +
1097 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001098 mb();
1099out:
1100 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1101 __func__, ret);
1102 return ret;
1103}
1104
Ritesh Harjaniea709662015-05-27 15:40:24 +05301105static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1106{
1107 int ret = 0;
1108 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1109 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1110 struct mmc_host *mmc = host->mmc;
1111
1112 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1113
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301114 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1115 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301116 mmc_hostname(mmc));
1117 return -EINVAL;
1118 }
1119
1120 if (msm_host->calibration_done ||
1121 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1122 return 0;
1123 }
1124
1125 /*
1126 * Reset the tuning block.
1127 */
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08001128 ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
Ritesh Harjaniea709662015-05-27 15:40:24 +05301129 if (ret)
1130 goto out;
1131
1132 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1133out:
1134 if (!ret)
1135 msm_host->calibration_done = true;
1136 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1137 __func__, ret);
1138 return ret;
1139}
1140
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001141static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1142{
1143 int ret = 0;
1144 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1145 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301146 const struct sdhci_msm_offset *msm_host_offset =
1147 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001148
1149 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1150
1151 /*
1152 * Retuning in HS400 (DDR mode) will fail, just reset the
1153 * tuning block and restore the saved tuning phase.
1154 */
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08001155 ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001156 if (ret)
1157 goto out;
1158
1159 /* Set the selected phase in delay line hw block */
1160 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1161 if (ret)
1162 goto out;
Veerabhadrarao Badiganti58f639e2019-07-01 19:35:01 +05301163 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
1164 writel_relaxed((readl_relaxed(host->ioaddr +
1165 msm_host_offset->CORE_DLL_CONFIG)
1166 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1167 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001168
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001169 if (msm_host->use_cdclp533)
1170 /* Calibrate CDCLP533 DLL HW */
1171 ret = sdhci_msm_cdclp533_calibration(host);
1172 else
1173 /* Calibrate CM_DLL_SDC4 HW */
1174 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1175out:
1176 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1177 __func__, ret);
1178 return ret;
1179}
1180
Krishna Konda96e6b112013-10-28 15:25:03 -07001181static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1182 u8 drv_type)
1183{
1184 struct mmc_command cmd = {0};
1185 struct mmc_request mrq = {NULL};
1186 struct mmc_host *mmc = host->mmc;
1187 u8 val = ((drv_type << 4) | 2);
1188
1189 cmd.opcode = MMC_SWITCH;
1190 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1191 (EXT_CSD_HS_TIMING << 16) |
1192 (val << 8) |
1193 EXT_CSD_CMD_SET_NORMAL;
1194 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1195 /* 1 sec */
1196 cmd.busy_timeout = 1000 * 1000;
1197
1198 memset(cmd.resp, 0, sizeof(cmd.resp));
1199 cmd.retries = 3;
1200
1201 mrq.cmd = &cmd;
1202 cmd.data = NULL;
1203
1204 mmc_wait_for_req(mmc, &mrq);
1205 pr_debug("%s: %s: set card drive type to %d\n",
1206 mmc_hostname(mmc), __func__,
1207 drv_type);
1208}
1209
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001210int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1211{
1212 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301213 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001214 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001215 const u32 *tuning_block_pattern = tuning_block_64;
1216 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1217 int rc;
1218 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301219 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001220 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1221 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001222 u8 drv_type = 0;
1223 bool drv_type_changed = false;
1224 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301225 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301226 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301227
1228 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001229 * Tuning is required for SDR104, HS200 and HS400 cards and
1230 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301231 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001232 if (host->clock <= CORE_FREQ_100MHZ ||
1233 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1234 (ios.timing == MMC_TIMING_MMC_HS200) ||
1235 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301236 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001237
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301238 /*
1239 * Don't allow re-tuning for CRC errors observed for any commands
1240 * that are sent during tuning sequence itself.
1241 */
1242 if (msm_host->tuning_in_progress)
1243 return 0;
1244 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001245 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001246
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001247 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001248 if (msm_host->tuning_done && !msm_host->calibration_done &&
1249 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001250 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001251 spin_lock_irqsave(&host->lock, flags);
1252 if (!rc)
1253 msm_host->calibration_done = true;
1254 spin_unlock_irqrestore(&host->lock, flags);
1255 goto out;
1256 }
1257
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001258 spin_lock_irqsave(&host->lock, flags);
1259
1260 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1261 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1262 tuning_block_pattern = tuning_block_128;
1263 size = sizeof(tuning_block_128);
1264 }
1265 spin_unlock_irqrestore(&host->lock, flags);
1266
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001267 data_buf = kmalloc(size, GFP_KERNEL);
1268 if (!data_buf) {
1269 rc = -ENOMEM;
1270 goto out;
1271 }
1272
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301273retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001274 tuned_phase_cnt = 0;
1275
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301276 /* first of all reset the tuning block */
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08001277 rc = msm_init_cm_dll(host, DLL_INIT_NORMAL);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301278 if (rc)
1279 goto kfree;
1280
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001281 phase = 0;
1282 do {
1283 struct mmc_command cmd = {0};
1284 struct mmc_data data = {0};
1285 struct mmc_request mrq = {
1286 .cmd = &cmd,
1287 .data = &data
1288 };
1289 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301290 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001291
1292 /* set the phase in delay line hw block */
1293 rc = msm_config_cm_dll_phase(host, phase);
1294 if (rc)
1295 goto kfree;
1296
1297 cmd.opcode = opcode;
1298 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1299
1300 data.blksz = size;
1301 data.blocks = 1;
1302 data.flags = MMC_DATA_READ;
1303 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1304
1305 data.sg = &sg;
1306 data.sg_len = 1;
1307 sg_init_one(&sg, data_buf, size);
1308 memset(data_buf, 0, size);
1309 mmc_wait_for_req(mmc, &mrq);
1310
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301311 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301312 /*
1313 * Set the dll to last known good phase while sending
1314 * status command to ensure that status command won't
1315 * fail due to bad phase.
1316 */
1317 if (tuned_phase_cnt)
1318 last_good_phase =
1319 tuned_phases[tuned_phase_cnt-1];
1320 else if (msm_host->saved_tuning_phase !=
1321 INVALID_TUNING_PHASE)
1322 last_good_phase = msm_host->saved_tuning_phase;
1323
1324 rc = msm_config_cm_dll_phase(host, last_good_phase);
1325 if (rc)
1326 goto kfree;
1327
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301328 sts_cmd.opcode = MMC_SEND_STATUS;
1329 sts_cmd.arg = card->rca << 16;
1330 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1331 sts_retry = 5;
1332 while (sts_retry) {
1333 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1334
1335 if (sts_cmd.error ||
1336 (R1_CURRENT_STATE(sts_cmd.resp[0])
1337 != R1_STATE_TRAN)) {
1338 sts_retry--;
1339 /*
1340 * wait for at least 146 MCLK cycles for
1341 * the card to move to TRANS state. As
1342 * the MCLK would be min 200MHz for
1343 * tuning, we need max 0.73us delay. To
1344 * be on safer side 1ms delay is given.
1345 */
1346 usleep_range(1000, 1200);
1347 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1348 mmc_hostname(mmc), phase,
1349 sts_cmd.error, sts_cmd.resp[0]);
1350 continue;
1351 }
1352 break;
1353 };
1354 }
1355
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001356 if (!cmd.error && !data.error &&
1357 !memcmp(data_buf, tuning_block_pattern, size)) {
1358 /* tuning is successful at this tuning point */
1359 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001360 pr_debug("%s: %s: found *** good *** phase = %d\n",
1361 mmc_hostname(mmc), __func__, phase);
1362 } else {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301363 /* Ignore crc errors occurred during tuning */
1364 if (cmd.error)
1365 mmc->err_stats[MMC_ERR_CMD_CRC]--;
1366 else if (data.error)
1367 mmc->err_stats[MMC_ERR_DAT_CRC]--;
Krishna Konda96e6b112013-10-28 15:25:03 -07001368 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001369 mmc_hostname(mmc), __func__, phase);
1370 }
1371 } while (++phase < 16);
1372
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301373 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1374 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001375 /*
1376 * If all phases pass then its a problem. So change the card's
1377 * drive type to a different value, if supported and repeat
1378 * tuning until at least one phase fails. Then set the original
1379 * drive type back.
1380 *
1381 * If all the phases still pass after trying all possible
1382 * drive types, then one of those 16 phases will be picked.
1383 * This is no different from what was going on before the
1384 * modification to change drive type and retune.
1385 */
1386 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1387 tuned_phase_cnt);
1388
1389 /* set drive type to other value . default setting is 0x0 */
1390 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001391 pr_debug("%s: trying different drive strength (%d)\n",
1392 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001393 if (card->ext_csd.raw_driver_strength &
1394 (1 << drv_type)) {
1395 sdhci_msm_set_mmc_drv_type(host, opcode,
1396 drv_type);
1397 if (!drv_type_changed)
1398 drv_type_changed = true;
1399 goto retry;
1400 }
1401 }
1402 }
1403
1404 /* reset drive type to default (50 ohm) if changed */
1405 if (drv_type_changed)
1406 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1407
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001408 if (tuned_phase_cnt) {
1409 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1410 tuned_phase_cnt);
1411 if (rc < 0)
1412 goto kfree;
1413 else
1414 phase = (u8)rc;
1415
1416 /*
1417 * Finally set the selected phase in delay
1418 * line hw block.
1419 */
1420 rc = msm_config_cm_dll_phase(host, phase);
1421 if (rc)
1422 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001423 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001424 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1425 mmc_hostname(mmc), __func__, phase);
1426 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301427 if (--tuning_seq_cnt)
1428 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001429 /* tuning failed */
1430 pr_err("%s: %s: no tuning point found\n",
1431 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301432 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001433 }
1434
1435kfree:
1436 kfree(data_buf);
1437out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001438 spin_lock_irqsave(&host->lock, flags);
1439 if (!rc)
1440 msm_host->tuning_done = true;
1441 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301442 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001443 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001444 return rc;
1445}
1446
Asutosh Das0ef24812012-12-18 16:14:02 +05301447static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1448{
1449 struct sdhci_msm_gpio_data *curr;
1450 int i, ret = 0;
1451
1452 curr = pdata->pin_data->gpio_data;
1453 for (i = 0; i < curr->size; i++) {
1454 if (!gpio_is_valid(curr->gpio[i].no)) {
1455 ret = -EINVAL;
1456 pr_err("%s: Invalid gpio = %d\n", __func__,
1457 curr->gpio[i].no);
1458 goto free_gpios;
1459 }
1460 if (enable) {
1461 ret = gpio_request(curr->gpio[i].no,
1462 curr->gpio[i].name);
1463 if (ret) {
1464 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1465 __func__, curr->gpio[i].no,
1466 curr->gpio[i].name, ret);
1467 goto free_gpios;
1468 }
1469 curr->gpio[i].is_enabled = true;
1470 } else {
1471 gpio_free(curr->gpio[i].no);
1472 curr->gpio[i].is_enabled = false;
1473 }
1474 }
1475 return ret;
1476
1477free_gpios:
1478 for (i--; i >= 0; i--) {
1479 gpio_free(curr->gpio[i].no);
1480 curr->gpio[i].is_enabled = false;
1481 }
1482 return ret;
1483}
1484
Can Guob903ad82017-10-17 13:22:53 +08001485static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
1486 unsigned int clock)
1487{
1488 int ret = 0;
1489
1490 if (clock > 150000000) {
1491 if (pdata->pctrl_data->pins_drv_type_200MHz)
1492 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1493 pdata->pctrl_data->pins_drv_type_200MHz);
1494 } else if (clock > 75000000) {
1495 if (pdata->pctrl_data->pins_drv_type_100MHz)
1496 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1497 pdata->pctrl_data->pins_drv_type_100MHz);
1498 } else if (clock > 400000) {
1499 if (pdata->pctrl_data->pins_drv_type_50MHz)
1500 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1501 pdata->pctrl_data->pins_drv_type_50MHz);
1502 } else {
1503 if (pdata->pctrl_data->pins_drv_type_400KHz)
1504 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1505 pdata->pctrl_data->pins_drv_type_400KHz);
1506 }
1507
1508 return ret;
1509}
1510
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301511static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1512 bool enable)
1513{
1514 int ret = 0;
1515
1516 if (enable)
1517 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1518 pdata->pctrl_data->pins_active);
1519 else
1520 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1521 pdata->pctrl_data->pins_sleep);
1522
1523 if (ret < 0)
1524 pr_err("%s state for pinctrl failed with %d\n",
1525 enable ? "Enabling" : "Disabling", ret);
1526
1527 return ret;
1528}
1529
Asutosh Das0ef24812012-12-18 16:14:02 +05301530static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1531{
1532 int ret = 0;
1533
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301534 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301535 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301536 } else if (pdata->pctrl_data) {
1537 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1538 goto out;
1539 } else if (!pdata->pin_data) {
1540 return 0;
1541 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301542
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301543 if (pdata->pin_data->is_gpio)
1544 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301545out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301546 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301547 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301548
1549 return ret;
1550}
1551
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301552static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1553 u32 **out, int *len, u32 size)
1554{
1555 int ret = 0;
1556 struct device_node *np = dev->of_node;
1557 size_t sz;
1558 u32 *arr = NULL;
1559
1560 if (!of_get_property(np, prop_name, len)) {
1561 ret = -EINVAL;
1562 goto out;
1563 }
1564 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001565 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301566 dev_err(dev, "%s invalid size\n", prop_name);
1567 ret = -EINVAL;
1568 goto out;
1569 }
1570
1571 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1572 if (!arr) {
1573 dev_err(dev, "%s failed allocating memory\n", prop_name);
1574 ret = -ENOMEM;
1575 goto out;
1576 }
1577
1578 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1579 if (ret < 0) {
1580 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1581 goto out;
1582 }
1583 *out = arr;
1584out:
1585 if (ret)
1586 *len = 0;
1587 return ret;
1588}
1589
Asutosh Das0ef24812012-12-18 16:14:02 +05301590#define MAX_PROP_SIZE 32
1591static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1592 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1593{
1594 int len, ret = 0;
1595 const __be32 *prop;
1596 char prop_name[MAX_PROP_SIZE];
1597 struct sdhci_msm_reg_data *vreg;
1598 struct device_node *np = dev->of_node;
1599
1600 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1601 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301602 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301603 return ret;
1604 }
1605
1606 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1607 if (!vreg) {
1608 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1609 ret = -ENOMEM;
1610 return ret;
1611 }
1612
1613 vreg->name = vreg_name;
1614
1615 snprintf(prop_name, MAX_PROP_SIZE,
1616 "qcom,%s-always-on", vreg_name);
1617 if (of_get_property(np, prop_name, NULL))
1618 vreg->is_always_on = true;
1619
1620 snprintf(prop_name, MAX_PROP_SIZE,
1621 "qcom,%s-lpm-sup", vreg_name);
1622 if (of_get_property(np, prop_name, NULL))
1623 vreg->lpm_sup = true;
1624
1625 snprintf(prop_name, MAX_PROP_SIZE,
1626 "qcom,%s-voltage-level", vreg_name);
1627 prop = of_get_property(np, prop_name, &len);
1628 if (!prop || (len != (2 * sizeof(__be32)))) {
1629 dev_warn(dev, "%s %s property\n",
1630 prop ? "invalid format" : "no", prop_name);
1631 } else {
1632 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1633 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1634 }
1635
1636 snprintf(prop_name, MAX_PROP_SIZE,
1637 "qcom,%s-current-level", vreg_name);
1638 prop = of_get_property(np, prop_name, &len);
1639 if (!prop || (len != (2 * sizeof(__be32)))) {
1640 dev_warn(dev, "%s %s property\n",
1641 prop ? "invalid format" : "no", prop_name);
1642 } else {
1643 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1644 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1645 }
1646
1647 *vreg_data = vreg;
1648 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1649 vreg->name, vreg->is_always_on ? "always_on," : "",
1650 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1651 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1652
1653 return ret;
1654}
1655
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301656static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1657 struct sdhci_msm_pltfm_data *pdata)
1658{
1659 struct sdhci_pinctrl_data *pctrl_data;
1660 struct pinctrl *pctrl;
1661 int ret = 0;
1662
1663 /* Try to obtain pinctrl handle */
1664 pctrl = devm_pinctrl_get(dev);
1665 if (IS_ERR(pctrl)) {
1666 ret = PTR_ERR(pctrl);
1667 goto out;
1668 }
1669 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1670 if (!pctrl_data) {
1671 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1672 ret = -ENOMEM;
1673 goto out;
1674 }
1675 pctrl_data->pctrl = pctrl;
1676 /* Look-up and keep the states handy to be used later */
1677 pctrl_data->pins_active = pinctrl_lookup_state(
1678 pctrl_data->pctrl, "active");
1679 if (IS_ERR(pctrl_data->pins_active)) {
1680 ret = PTR_ERR(pctrl_data->pins_active);
1681 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1682 goto out;
1683 }
1684 pctrl_data->pins_sleep = pinctrl_lookup_state(
1685 pctrl_data->pctrl, "sleep");
1686 if (IS_ERR(pctrl_data->pins_sleep)) {
1687 ret = PTR_ERR(pctrl_data->pins_sleep);
1688 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1689 goto out;
1690 }
Can Guob903ad82017-10-17 13:22:53 +08001691
1692 pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
1693 pctrl_data->pctrl, "ds_400KHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301694 if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001695 dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301696 pctrl_data->pins_drv_type_400KHz = NULL;
1697 }
Can Guob903ad82017-10-17 13:22:53 +08001698
1699 pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
1700 pctrl_data->pctrl, "ds_50MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301701 if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001702 dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301703 pctrl_data->pins_drv_type_50MHz = NULL;
1704 }
Can Guob903ad82017-10-17 13:22:53 +08001705
1706 pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
1707 pctrl_data->pctrl, "ds_100MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301708 if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001709 dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301710 pctrl_data->pins_drv_type_100MHz = NULL;
1711 }
Can Guob903ad82017-10-17 13:22:53 +08001712
1713 pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
1714 pctrl_data->pctrl, "ds_200MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301715 if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001716 dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301717 pctrl_data->pins_drv_type_200MHz = NULL;
1718 }
Can Guob903ad82017-10-17 13:22:53 +08001719
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301720 pdata->pctrl_data = pctrl_data;
1721out:
1722 return ret;
1723}
1724
Asutosh Das0ef24812012-12-18 16:14:02 +05301725#define GPIO_NAME_MAX_LEN 32
1726static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1727 struct sdhci_msm_pltfm_data *pdata)
1728{
1729 int ret = 0, cnt, i;
1730 struct sdhci_msm_pin_data *pin_data;
1731 struct device_node *np = dev->of_node;
1732
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301733 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1734 if (!ret) {
1735 goto out;
1736 } else if (ret == -EPROBE_DEFER) {
1737 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1738 goto out;
1739 } else {
1740 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1741 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301742 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301743 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301744 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1745 if (!pin_data) {
1746 dev_err(dev, "No memory for pin_data\n");
1747 ret = -ENOMEM;
1748 goto out;
1749 }
1750
1751 cnt = of_gpio_count(np);
1752 if (cnt > 0) {
1753 pin_data->gpio_data = devm_kzalloc(dev,
1754 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1755 if (!pin_data->gpio_data) {
1756 dev_err(dev, "No memory for gpio_data\n");
1757 ret = -ENOMEM;
1758 goto out;
1759 }
1760 pin_data->gpio_data->size = cnt;
1761 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1762 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1763
1764 if (!pin_data->gpio_data->gpio) {
1765 dev_err(dev, "No memory for gpio\n");
1766 ret = -ENOMEM;
1767 goto out;
1768 }
1769
1770 for (i = 0; i < cnt; i++) {
1771 const char *name = NULL;
1772 char result[GPIO_NAME_MAX_LEN];
1773 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1774 of_property_read_string_index(np,
1775 "qcom,gpio-names", i, &name);
1776
1777 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1778 dev_name(dev), name ? name : "?");
1779 pin_data->gpio_data->gpio[i].name = result;
1780 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1781 pin_data->gpio_data->gpio[i].name,
1782 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301783 }
1784 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301785 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301786out:
1787 if (ret)
1788 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1789 return ret;
1790}
1791
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001792#ifdef CONFIG_SMP
1793static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1794{
1795 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1796}
1797#else
1798static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1799#endif
1800
Gilad Bronerc788a672015-09-08 15:39:11 +03001801static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1802 struct sdhci_msm_pltfm_data *pdata)
1803{
1804 struct device_node *np = dev->of_node;
1805 const char *str;
1806 u32 cpu;
1807 int ret = 0;
1808 int i;
1809
1810 pdata->pm_qos_data.irq_valid = false;
1811 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1812 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1813 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001814 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001815 }
1816
1817 /* must specify cpu for "affine_cores" type */
1818 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1819 pdata->pm_qos_data.irq_cpu = -1;
1820 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1821 if (ret) {
1822 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1823 ret);
1824 goto out;
1825 }
1826 if (cpu < 0 || cpu >= num_possible_cpus()) {
1827 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1828 __func__, cpu, num_possible_cpus());
1829 ret = -EINVAL;
1830 goto out;
1831 }
1832 pdata->pm_qos_data.irq_cpu = cpu;
1833 }
1834
1835 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1836 SDHCI_POWER_POLICY_NUM) {
1837 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1838 __func__, SDHCI_POWER_POLICY_NUM);
1839 ret = -EINVAL;
1840 goto out;
1841 }
1842
1843 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1844 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1845 &pdata->pm_qos_data.irq_latency.latency[i]);
1846
1847 pdata->pm_qos_data.irq_valid = true;
1848out:
1849 return ret;
1850}
1851
1852static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1853 struct sdhci_msm_pltfm_data *pdata)
1854{
1855 struct device_node *np = dev->of_node;
1856 u32 mask;
1857 int nr_groups;
1858 int ret;
1859 int i;
1860
1861 /* Read cpu group mapping */
1862 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1863 if (nr_groups <= 0) {
1864 ret = -EINVAL;
1865 goto out;
1866 }
1867 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1868 pdata->pm_qos_data.cpu_group_map.mask =
1869 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1870 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1871 ret = -ENOMEM;
1872 goto out;
1873 }
1874
1875 for (i = 0; i < nr_groups; i++) {
1876 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1877 i, &mask);
1878
1879 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1880 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1881 cpu_possible_mask)) {
1882 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1883 __func__, mask, i);
1884 ret = -EINVAL;
1885 goto free_res;
1886 }
1887 }
1888 return 0;
1889
1890free_res:
1891 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1892out:
1893 return ret;
1894}
1895
1896static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1897 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1898{
1899 struct device_node *np = dev->of_node;
1900 struct sdhci_msm_pm_qos_latency *values;
1901 int ret;
1902 int i;
1903 int group;
1904 int cfg;
1905
1906 ret = of_property_count_u32_elems(np, name);
1907 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1908 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1909 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1910 ret);
1911 return -EINVAL;
1912 } else if (ret < 0) {
1913 return ret;
1914 }
1915
1916 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1917 GFP_KERNEL);
1918 if (!values)
1919 return -ENOMEM;
1920
1921 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1922 group = i / SDHCI_POWER_POLICY_NUM;
1923 cfg = i % SDHCI_POWER_POLICY_NUM;
1924 of_property_read_u32_index(np, name, i,
1925 &(values[group].latency[cfg]));
1926 }
1927
1928 *latency = values;
1929 return 0;
1930}
1931
1932static void sdhci_msm_pm_qos_parse(struct device *dev,
1933 struct sdhci_msm_pltfm_data *pdata)
1934{
1935 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1936 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1937 __func__);
1938
1939 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1940 pdata->pm_qos_data.cmdq_valid =
1941 !sdhci_msm_pm_qos_parse_latency(dev,
1942 "qcom,pm-qos-cmdq-latency-us",
1943 pdata->pm_qos_data.cpu_group_map.nr_groups,
1944 &pdata->pm_qos_data.cmdq_latency);
1945 pdata->pm_qos_data.legacy_valid =
1946 !sdhci_msm_pm_qos_parse_latency(dev,
1947 "qcom,pm-qos-legacy-latency-us",
1948 pdata->pm_qos_data.cpu_group_map.nr_groups,
1949 &pdata->pm_qos_data.latency);
1950 if (!pdata->pm_qos_data.cmdq_valid &&
1951 !pdata->pm_qos_data.legacy_valid) {
1952 /* clean-up previously allocated arrays */
1953 kfree(pdata->pm_qos_data.latency);
1954 kfree(pdata->pm_qos_data.cmdq_latency);
1955 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1956 __func__);
1957 }
1958 } else {
1959 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1960 __func__);
1961 }
1962}
1963
Asutosh Das1c43b132018-01-11 18:08:40 +05301964#ifdef CONFIG_NVMEM
1965/* Parse qfprom data for deciding on errata work-arounds */
1966static long qfprom_read(struct device *dev, const char *name)
1967{
1968 struct nvmem_cell *cell;
1969 ssize_t len = 0;
1970 u32 *buf, val = 0;
1971 long err = 0;
1972
1973 cell = nvmem_cell_get(dev, name);
1974 if (IS_ERR(cell)) {
1975 err = PTR_ERR(cell);
1976 dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
1977 /* If entry does not exist, then that is not an error */
1978 if (err == -ENOENT)
1979 err = 0;
1980 return err;
1981 }
1982
1983 buf = (u32 *)nvmem_cell_read(cell, &len);
1984 if (IS_ERR(buf) || !len) {
1985 dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
1986 *buf, len);
1987 if (!IS_ERR(buf)) {
1988 kfree(buf);
1989 err = -EINVAL;
1990 } else {
1991 err = PTR_ERR(buf);
1992 }
1993 } else {
Asutosh Dasb8614aa2018-01-31 15:44:15 +05301994 /*
1995 * 30 bits from bit offset 0 would be read.
1996 * We're interested in bits 28:29
1997 */
1998 val = (*buf >> 28) & 0x3;
Asutosh Das1c43b132018-01-11 18:08:40 +05301999 kfree(buf);
2000 }
2001
2002 nvmem_cell_put(cell);
2003 return err ? err : (long) val;
2004}
2005
2006/* Reads the SoC version */
2007static int sdhci_msm_get_socrev(struct device *dev,
2008 struct sdhci_msm_host *msm_host)
2009{
2010
2011 msm_host->soc_min_rev = qfprom_read(dev, "minor_rev");
2012
2013 if (msm_host->soc_min_rev < 0)
2014 dev_err(dev, "failed getting soc_min_rev, err : %d\n",
2015 msm_host->soc_min_rev);
2016 return msm_host->soc_min_rev;
2017}
2018#else
2019/* Reads the SoC version */
2020static int sdhci_msm_get_socrev(struct device *dev,
2021 struct sdhci_msm_host *msm_host)
2022{
2023 return 0;
2024}
2025#endif
2026
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07002027static int sdhci_msm_dt_parse_hsr_info(struct device *dev,
2028 struct sdhci_msm_host *msm_host)
2029
2030{
2031 u32 *dll_hsr_table = NULL;
2032 int dll_hsr_table_len, dll_hsr_reg_count;
2033 int ret = 0;
2034
2035 if (sdhci_msm_dt_get_array(dev, "qcom,dll-hsr-list",
2036 &dll_hsr_table, &dll_hsr_table_len, 0))
2037 goto skip_hsr;
2038
2039 dll_hsr_reg_count = sizeof(struct sdhci_msm_dll_hsr) / sizeof(u32);
2040 if (dll_hsr_table_len != dll_hsr_reg_count) {
2041 dev_err(dev, "Number of HSR entries are not matching\n");
2042 ret = -EINVAL;
2043 } else {
2044 msm_host->dll_hsr = (struct sdhci_msm_dll_hsr *)dll_hsr_table;
2045 }
2046
2047skip_hsr:
2048 if (!msm_host->dll_hsr)
2049 dev_info(dev, "Failed to get dll hsr settings from dt\n");
2050 return ret;
2051}
2052
Asutosh Das0ef24812012-12-18 16:14:02 +05302053/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002054static
2055struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
2056 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05302057{
2058 struct sdhci_msm_pltfm_data *pdata = NULL;
2059 struct device_node *np = dev->of_node;
2060 u32 bus_width = 0;
2061 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302062 int clk_table_len;
2063 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302064 int ice_clk_table_len;
2065 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05302066 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05302067 const char *lower_bus_speed = NULL;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302068 int bus_clk_table_len;
2069 u32 *bus_clk_table = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302070
2071 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2072 if (!pdata) {
2073 dev_err(dev, "failed to allocate memory for platform data\n");
2074 goto out;
2075 }
2076
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05302077 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07002078 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05302079 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05302080
Asutosh Das0ef24812012-12-18 16:14:02 +05302081 of_property_read_u32(np, "qcom,bus-width", &bus_width);
2082 if (bus_width == 8)
2083 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
2084 else if (bus_width == 4)
2085 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
2086 else {
2087 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
2088 pdata->mmc_bus_width = 0;
2089 }
2090
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002091 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302092 &msm_host->mmc->clk_scaling.pltfm_freq_table,
2093 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002094 pr_debug("%s: no clock scaling frequencies were supplied\n",
2095 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302096 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
2097 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
2098 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002099
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05302100 /*
2101 * Few hosts can support DDR52 mode at the same lower
2102 * system voltage corner as high-speed mode. In such cases,
2103 * it is always better to put it in DDR mode which will
2104 * improve the performance without any power impact.
2105 */
2106 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
2107 &lower_bus_speed)) {
2108 if (!strcmp(lower_bus_speed, "DDR52"))
2109 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
2110 MMC_SCALING_LOWER_DDR52_MODE;
2111 }
2112
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302113 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
2114 &clk_table, &clk_table_len, 0)) {
2115 dev_err(dev, "failed parsing supported clock rates\n");
2116 goto out;
2117 }
2118 if (!clk_table || !clk_table_len) {
2119 dev_err(dev, "Invalid clock table\n");
2120 goto out;
2121 }
2122 pdata->sup_clk_table = clk_table;
2123 pdata->sup_clk_cnt = clk_table_len;
2124
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302125 if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
2126 &bus_clk_table, &bus_clk_table_len, 0)) {
2127 if (bus_clk_table && bus_clk_table_len) {
2128 pdata->bus_clk_table = bus_clk_table;
2129 pdata->bus_clk_cnt = bus_clk_table_len;
2130 }
2131 }
2132
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302133 if (msm_host->ice.pdev) {
2134 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
2135 &ice_clk_table, &ice_clk_table_len, 0)) {
2136 dev_err(dev, "failed parsing supported ice clock rates\n");
2137 goto out;
2138 }
2139 if (!ice_clk_table || !ice_clk_table_len) {
2140 dev_err(dev, "Invalid clock table\n");
2141 goto out;
2142 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05302143 if (ice_clk_table_len != 2) {
2144 dev_err(dev, "Need max and min frequencies in the table\n");
2145 goto out;
2146 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302147 pdata->sup_ice_clk_table = ice_clk_table;
2148 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05302149 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
2150 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
2151 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
2152 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302153 }
2154
Asutosh Das0ef24812012-12-18 16:14:02 +05302155 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
2156 sdhci_msm_slot_reg_data),
2157 GFP_KERNEL);
2158 if (!pdata->vreg_data) {
2159 dev_err(dev, "failed to allocate memory for vreg data\n");
2160 goto out;
2161 }
2162
2163 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
2164 "vdd")) {
2165 dev_err(dev, "failed parsing vdd data\n");
2166 goto out;
2167 }
2168 if (sdhci_msm_dt_parse_vreg_info(dev,
2169 &pdata->vreg_data->vdd_io_data,
2170 "vdd-io")) {
2171 dev_err(dev, "failed parsing vdd-io data\n");
2172 goto out;
2173 }
2174
2175 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
2176 dev_err(dev, "failed parsing gpio data\n");
2177 goto out;
2178 }
2179
Asutosh Das0ef24812012-12-18 16:14:02 +05302180 len = of_property_count_strings(np, "qcom,bus-speed-mode");
2181
2182 for (i = 0; i < len; i++) {
2183 const char *name = NULL;
2184
2185 of_property_read_string_index(np,
2186 "qcom,bus-speed-mode", i, &name);
2187 if (!name)
2188 continue;
2189
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002190 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
2191 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
2192 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
2193 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
2194 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05302195 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2196 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
2197 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2198 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
2199 pdata->caps |= MMC_CAP_1_8V_DDR
2200 | MMC_CAP_UHS_DDR50;
2201 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
2202 pdata->caps |= MMC_CAP_1_2V_DDR
2203 | MMC_CAP_UHS_DDR50;
2204 }
2205
2206 if (of_get_property(np, "qcom,nonremovable", NULL))
2207 pdata->nonremovable = true;
2208
Guoping Yuf7c91332014-08-20 16:56:18 +08002209 if (of_get_property(np, "qcom,nonhotplug", NULL))
2210 pdata->nonhotplug = true;
2211
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08002212 pdata->largeaddressbus =
2213 of_property_read_bool(np, "qcom,large-address-bus");
2214
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002215 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
2216 msm_host->mmc->wakeup_on_idle = true;
2217
Gilad Bronerc788a672015-09-08 15:39:11 +03002218 sdhci_msm_pm_qos_parse(dev, pdata);
2219
Pavan Anamula5a256df2015-10-16 14:38:28 +05302220 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302221 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05302222
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002223 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002224 msm_host->regs_restore.is_supported =
2225 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002226
Vijay Viswanatha5492612017-10-17 15:38:55 +05302227 if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
2228 pdata->rclk_wa = true;
2229
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07002230 if (sdhci_msm_dt_parse_hsr_info(dev, msm_host))
2231 goto out;
2232
Asutosh Das1c43b132018-01-11 18:08:40 +05302233 /*
2234 * rclk_wa is not required if soc version is mentioned and
2235 * is not base version.
2236 */
2237 if (msm_host->soc_min_rev != 0)
2238 pdata->rclk_wa = false;
2239
Asutosh Das0ef24812012-12-18 16:14:02 +05302240 return pdata;
2241out:
2242 return NULL;
2243}
2244
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302245/* Returns required bandwidth in Bytes per Sec */
2246static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
2247 struct mmc_ios *ios)
2248{
Sahitya Tummala2886c922013-04-03 18:03:31 +05302249 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2250 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2251
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302252 unsigned int bw;
2253
Sahitya Tummala2886c922013-04-03 18:03:31 +05302254 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302255 /*
2256 * For DDR mode, SDCC controller clock will be at
2257 * the double rate than the actual clock that goes to card.
2258 */
2259 if (ios->bus_width == MMC_BUS_WIDTH_4)
2260 bw /= 2;
2261 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2262 bw /= 8;
2263
2264 return bw;
2265}
2266
2267static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2268 unsigned int bw)
2269{
2270 unsigned int *table = host->pdata->voting_data->bw_vecs;
2271 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2272 int i;
2273
2274 if (host->msm_bus_vote.is_max_bw_needed && bw)
2275 return host->msm_bus_vote.max_bw_vote;
2276
2277 for (i = 0; i < size; i++) {
2278 if (bw <= table[i])
2279 break;
2280 }
2281
2282 if (i && (i == size))
2283 i--;
2284
2285 return i;
2286}
2287
2288/*
2289 * This function must be called with host lock acquired.
2290 * Caller of this function should also ensure that msm bus client
2291 * handle is not null.
2292 */
2293static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2294 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302295 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302296{
2297 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2298 int rc = 0;
2299
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302300 BUG_ON(!flags);
2301
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302302 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302303 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302304 rc = msm_bus_scale_client_update_request(
2305 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302306 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302307 if (rc) {
2308 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2309 mmc_hostname(host->mmc),
2310 msm_host->msm_bus_vote.client_handle, vote, rc);
2311 goto out;
2312 }
2313 msm_host->msm_bus_vote.curr_vote = vote;
2314 }
2315out:
2316 return rc;
2317}
2318
2319/*
2320 * Internal work. Work to set 0 bandwidth for msm bus.
2321 */
2322static void sdhci_msm_bus_work(struct work_struct *work)
2323{
2324 struct sdhci_msm_host *msm_host;
2325 struct sdhci_host *host;
2326 unsigned long flags;
2327
2328 msm_host = container_of(work, struct sdhci_msm_host,
2329 msm_bus_vote.vote_work.work);
2330 host = platform_get_drvdata(msm_host->pdev);
2331
2332 if (!msm_host->msm_bus_vote.client_handle)
2333 return;
2334
2335 spin_lock_irqsave(&host->lock, flags);
2336 /* don't vote for 0 bandwidth if any request is in progress */
2337 if (!host->mrq) {
2338 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302339 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302340 } else
2341 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2342 mmc_hostname(host->mmc), __func__);
2343 spin_unlock_irqrestore(&host->lock, flags);
2344}
2345
2346/*
2347 * This function cancels any scheduled delayed work and sets the bus
2348 * vote based on bw (bandwidth) argument.
2349 */
2350static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2351 unsigned int bw)
2352{
2353 int vote;
2354 unsigned long flags;
2355 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2356 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2357
2358 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2359 spin_lock_irqsave(&host->lock, flags);
2360 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302361 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302362 spin_unlock_irqrestore(&host->lock, flags);
2363}
2364
2365#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2366
2367/* This function queues a work which will set the bandwidth requiement to 0 */
2368static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2369{
2370 unsigned long flags;
2371 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2372 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2373
2374 spin_lock_irqsave(&host->lock, flags);
2375 if (msm_host->msm_bus_vote.min_bw_vote !=
2376 msm_host->msm_bus_vote.curr_vote)
2377 queue_delayed_work(system_wq,
2378 &msm_host->msm_bus_vote.vote_work,
2379 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2380 spin_unlock_irqrestore(&host->lock, flags);
2381}
2382
2383static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2384 struct platform_device *pdev)
2385{
2386 int rc = 0;
2387 struct msm_bus_scale_pdata *bus_pdata;
2388
2389 struct sdhci_msm_bus_voting_data *data;
2390 struct device *dev = &pdev->dev;
2391
2392 data = devm_kzalloc(dev,
2393 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2394 if (!data) {
2395 dev_err(&pdev->dev,
2396 "%s: failed to allocate memory\n", __func__);
2397 rc = -ENOMEM;
2398 goto out;
2399 }
2400 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2401 if (data->bus_pdata) {
2402 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2403 &data->bw_vecs, &data->bw_vecs_size, 0);
2404 if (rc) {
2405 dev_err(&pdev->dev,
2406 "%s: Failed to get bus-bw-vectors-bps\n",
2407 __func__);
2408 goto out;
2409 }
2410 host->pdata->voting_data = data;
2411 }
2412 if (host->pdata->voting_data &&
2413 host->pdata->voting_data->bus_pdata &&
2414 host->pdata->voting_data->bw_vecs &&
2415 host->pdata->voting_data->bw_vecs_size) {
2416
2417 bus_pdata = host->pdata->voting_data->bus_pdata;
2418 host->msm_bus_vote.client_handle =
2419 msm_bus_scale_register_client(bus_pdata);
2420 if (!host->msm_bus_vote.client_handle) {
2421 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2422 rc = -EFAULT;
2423 goto out;
2424 }
2425 /* cache the vote index for minimum and maximum bandwidth */
2426 host->msm_bus_vote.min_bw_vote =
2427 sdhci_msm_bus_get_vote_for_bw(host, 0);
2428 host->msm_bus_vote.max_bw_vote =
2429 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2430 } else {
2431 devm_kfree(dev, data);
2432 }
2433
2434out:
2435 return rc;
2436}
2437
2438static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2439{
2440 if (host->msm_bus_vote.client_handle)
2441 msm_bus_scale_unregister_client(
2442 host->msm_bus_vote.client_handle);
2443}
2444
2445static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2446{
2447 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2448 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2449 struct mmc_ios *ios = &host->mmc->ios;
2450 unsigned int bw;
2451
2452 if (!msm_host->msm_bus_vote.client_handle)
2453 return;
2454
2455 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302456 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302457 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302458 } else {
2459 /*
2460 * If clock gating is enabled, then remove the vote
2461 * immediately because clocks will be disabled only
2462 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2463 * additional delay is required to remove the bus vote.
2464 */
2465#ifdef CONFIG_MMC_CLKGATE
2466 if (host->mmc->clkgate_delay)
2467 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2468 else
2469#endif
2470 sdhci_msm_bus_queue_work(host);
2471 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302472}
2473
Asutosh Das0ef24812012-12-18 16:14:02 +05302474/* Regulator utility functions */
2475static int sdhci_msm_vreg_init_reg(struct device *dev,
2476 struct sdhci_msm_reg_data *vreg)
2477{
2478 int ret = 0;
2479
2480 /* check if regulator is already initialized? */
2481 if (vreg->reg)
2482 goto out;
2483
2484 /* Get the regulator handle */
2485 vreg->reg = devm_regulator_get(dev, vreg->name);
2486 if (IS_ERR(vreg->reg)) {
2487 ret = PTR_ERR(vreg->reg);
2488 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2489 __func__, vreg->name, ret);
2490 goto out;
2491 }
2492
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302493 if (regulator_count_voltages(vreg->reg) > 0) {
2494 vreg->set_voltage_sup = true;
2495 /* sanity check */
2496 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2497 pr_err("%s: %s invalid constraints specified\n",
2498 __func__, vreg->name);
2499 ret = -EINVAL;
2500 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302501 }
2502
2503out:
2504 return ret;
2505}
2506
2507static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2508{
2509 if (vreg->reg)
2510 devm_regulator_put(vreg->reg);
2511}
2512
2513static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2514 *vreg, int uA_load)
2515{
2516 int ret = 0;
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07002517
Asutosh Das0ef24812012-12-18 16:14:02 +05302518 /*
2519 * regulators that do not support regulator_set_voltage also
2520 * do not support regulator_set_optimum_mode
2521 */
2522 if (vreg->set_voltage_sup) {
2523 ret = regulator_set_load(vreg->reg, uA_load);
2524 if (ret < 0)
2525 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2526 __func__, vreg->name, uA_load, ret);
2527 else
2528 /*
2529 * regulator_set_load() can return non zero
2530 * value even for success case.
2531 */
2532 ret = 0;
2533 }
2534 return ret;
2535}
2536
2537static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2538 int min_uV, int max_uV)
2539{
2540 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302541 if (vreg->set_voltage_sup) {
2542 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2543 if (ret) {
2544 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302545 __func__, vreg->name, min_uV, max_uV, ret);
2546 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302547 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302548
2549 return ret;
2550}
2551
2552static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2553{
2554 int ret = 0;
2555
2556 /* Put regulator in HPM (high power mode) */
2557 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2558 if (ret < 0)
2559 return ret;
2560
2561 if (!vreg->is_enabled) {
2562 /* Set voltage level */
2563 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2564 vreg->high_vol_level);
2565 if (ret)
2566 return ret;
2567 }
2568 ret = regulator_enable(vreg->reg);
2569 if (ret) {
2570 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2571 __func__, vreg->name, ret);
2572 return ret;
2573 }
2574 vreg->is_enabled = true;
2575 return ret;
2576}
2577
2578static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2579{
2580 int ret = 0;
2581
2582 /* Never disable regulator marked as always_on */
2583 if (vreg->is_enabled && !vreg->is_always_on) {
2584 ret = regulator_disable(vreg->reg);
2585 if (ret) {
2586 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2587 __func__, vreg->name, ret);
2588 goto out;
2589 }
2590 vreg->is_enabled = false;
2591
2592 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2593 if (ret < 0)
2594 goto out;
2595
2596 /* Set min. voltage level to 0 */
2597 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2598 if (ret)
2599 goto out;
2600 } else if (vreg->is_enabled && vreg->is_always_on) {
2601 if (vreg->lpm_sup) {
2602 /* Put always_on regulator in LPM (low power mode) */
2603 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2604 vreg->lpm_uA);
2605 if (ret < 0)
2606 goto out;
2607 }
2608 }
2609out:
2610 return ret;
2611}
2612
2613static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2614 bool enable, bool is_init)
2615{
2616 int ret = 0, i;
2617 struct sdhci_msm_slot_reg_data *curr_slot;
2618 struct sdhci_msm_reg_data *vreg_table[2];
2619
2620 curr_slot = pdata->vreg_data;
2621 if (!curr_slot) {
2622 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2623 __func__);
2624 goto out;
2625 }
2626
2627 vreg_table[0] = curr_slot->vdd_data;
2628 vreg_table[1] = curr_slot->vdd_io_data;
2629
2630 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2631 if (vreg_table[i]) {
2632 if (enable)
2633 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2634 else
2635 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2636 if (ret)
2637 goto out;
2638 }
2639 }
2640out:
2641 return ret;
2642}
2643
Asutosh Das0ef24812012-12-18 16:14:02 +05302644/* This init function should be called only once for each SDHC slot */
2645static int sdhci_msm_vreg_init(struct device *dev,
2646 struct sdhci_msm_pltfm_data *pdata,
2647 bool is_init)
2648{
2649 int ret = 0;
2650 struct sdhci_msm_slot_reg_data *curr_slot;
2651 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2652
2653 curr_slot = pdata->vreg_data;
2654 if (!curr_slot)
2655 goto out;
2656
2657 curr_vdd_reg = curr_slot->vdd_data;
2658 curr_vdd_io_reg = curr_slot->vdd_io_data;
2659
2660 if (!is_init)
2661 /* Deregister all regulators from regulator framework */
2662 goto vdd_io_reg_deinit;
2663
2664 /*
2665 * Get the regulator handle from voltage regulator framework
2666 * and then try to set the voltage level for the regulator
2667 */
2668 if (curr_vdd_reg) {
2669 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2670 if (ret)
2671 goto out;
2672 }
2673 if (curr_vdd_io_reg) {
2674 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2675 if (ret)
2676 goto vdd_reg_deinit;
2677 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302678
Asutosh Das0ef24812012-12-18 16:14:02 +05302679 if (ret)
2680 dev_err(dev, "vreg reset failed (%d)\n", ret);
2681 goto out;
2682
2683vdd_io_reg_deinit:
2684 if (curr_vdd_io_reg)
2685 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2686vdd_reg_deinit:
2687 if (curr_vdd_reg)
2688 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2689out:
2690 return ret;
2691}
2692
2693
2694static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2695 enum vdd_io_level level,
2696 unsigned int voltage_level)
2697{
2698 int ret = 0;
2699 int set_level;
2700 struct sdhci_msm_reg_data *vdd_io_reg;
2701
2702 if (!pdata->vreg_data)
2703 return ret;
2704
2705 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2706 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2707 switch (level) {
2708 case VDD_IO_LOW:
2709 set_level = vdd_io_reg->low_vol_level;
2710 break;
2711 case VDD_IO_HIGH:
2712 set_level = vdd_io_reg->high_vol_level;
2713 break;
2714 case VDD_IO_SET_LEVEL:
2715 set_level = voltage_level;
2716 break;
2717 default:
2718 pr_err("%s: invalid argument level = %d",
2719 __func__, level);
2720 ret = -EINVAL;
2721 return ret;
2722 }
2723 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2724 set_level);
2725 }
2726 return ret;
2727}
2728
Ritesh Harjani42876f42015-11-17 17:46:51 +05302729/*
2730 * Acquire spin-lock host->lock before calling this function
2731 */
2732static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2733 bool enable)
2734{
2735 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2736 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2737
2738 if (enable && !msm_host->is_sdiowakeup_enabled)
2739 enable_irq(msm_host->pdata->sdiowakeup_irq);
2740 else if (!enable && msm_host->is_sdiowakeup_enabled)
2741 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2742 else
2743 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2744 __func__, enable, msm_host->is_sdiowakeup_enabled);
2745 msm_host->is_sdiowakeup_enabled = enable;
2746}
2747
2748static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2749{
2750 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302751 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2752 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2753
Ritesh Harjani42876f42015-11-17 17:46:51 +05302754 unsigned long flags;
2755
2756 pr_debug("%s: irq (%d) received\n", __func__, irq);
2757
2758 spin_lock_irqsave(&host->lock, flags);
2759 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2760 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302761 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302762
2763 return IRQ_HANDLED;
2764}
2765
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302766void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2767{
2768 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2769 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302770 const struct sdhci_msm_offset *msm_host_offset =
2771 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302772 unsigned int irq_flags = 0;
2773 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302774
Siba Prasad0196fe42017-06-27 15:13:27 +05302775 if (pwr_irq_desc)
2776 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2777 state_use_accessors);
2778
2779 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302780 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302781 sdhci_msm_readl_relaxed(host,
2782 msm_host_offset->CORE_PWRCTL_STATUS),
2783 sdhci_msm_readl_relaxed(host,
2784 msm_host_offset->CORE_PWRCTL_MASK),
2785 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302786 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2787
2788 MMC_TRACE(host->mmc,
2789 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2790 __func__,
2791 sdhci_msm_readb_relaxed(host,
2792 msm_host_offset->CORE_PWRCTL_STATUS),
2793 sdhci_msm_readb_relaxed(host,
2794 msm_host_offset->CORE_PWRCTL_MASK),
2795 sdhci_msm_readb_relaxed(host,
2796 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302797}
2798
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002799static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value)
2800{
2801 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2802 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2803 const struct sdhci_msm_offset *msm_host_offset = msm_host->offset;
2804 int ret = 0, retry = 10;
2805
2806 /*
2807 * There is a rare HW scenario where the first clear pulse could be
2808 * lost when actual reset and clear/read of status register is
2809 * happening at a time. Hence, retry for at least 10 times to make
2810 * sure status register is cleared. Otherwise, this will result in
2811 * a spurious power IRQ resulting in system instability.
2812 */
2813 do {
2814 if (retry == 0) {
2815 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2816 mmc_hostname(host->mmc), value);
2817 sdhci_msm_dump_pwr_ctrl_regs(host);
2818 WARN_ON(1);
2819 ret = -EBUSY;
2820 break;
2821 }
2822
2823 /*
2824 * Clear the PWRCTL_STATUS interrupt bits by writing to the
2825 * corresponding bits in the PWRCTL_CLEAR register.
2826 */
2827 sdhci_msm_writeb_relaxed(value, host,
2828 msm_host_offset->CORE_PWRCTL_CLEAR);
2829 /*
2830 * SDHC has core_mem and hc_mem device memory and these memory
2831 * addresses do not fall within 1KB region. Hence, any update
2832 * to core_mem address space would require an mb() to ensure
2833 * this gets completed before its next update to registers
2834 * within hc_mem.
2835 */
2836 mb();
2837 retry--;
2838 udelay(10);
2839 } while (value & sdhci_msm_readb_relaxed(host,
2840 msm_host_offset->CORE_PWRCTL_STATUS));
2841
2842 return ret;
2843}
2844
Asutosh Das0ef24812012-12-18 16:14:02 +05302845static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2846{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002847 struct sdhci_host *host = (struct sdhci_host *)data;
2848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302850 const struct sdhci_msm_offset *msm_host_offset =
2851 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302852 u8 irq_status = 0;
2853 u8 irq_ack = 0;
2854 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302855 int pwr_state = 0, io_level = 0;
2856 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05302857
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302858 irq_status = sdhci_msm_readb_relaxed(host,
2859 msm_host_offset->CORE_PWRCTL_STATUS);
2860
Asutosh Das0ef24812012-12-18 16:14:02 +05302861 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2862 mmc_hostname(msm_host->mmc), irq, irq_status);
2863
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002864 sdhci_msm_clear_pwrctl_status(host, irq_status);
Asutosh Das0ef24812012-12-18 16:14:02 +05302865
2866 /* Handle BUS ON/OFF*/
2867 if (irq_status & CORE_PWRCTL_BUS_ON) {
2868 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302869 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302870 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302871 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2872 VDD_IO_HIGH, 0);
2873 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302874 if (ret)
2875 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2876 else
2877 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302878
2879 pwr_state = REQ_BUS_ON;
2880 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302881 }
2882 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302883 if (msm_host->pltfm_init_done)
2884 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2885 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302886 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302887 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302888 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2889 VDD_IO_LOW, 0);
2890 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302891 if (ret)
2892 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2893 else
2894 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302895
2896 pwr_state = REQ_BUS_OFF;
2897 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302898 }
2899 /* Handle IO LOW/HIGH */
2900 if (irq_status & CORE_PWRCTL_IO_LOW) {
2901 /* Switch voltage Low */
2902 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2903 if (ret)
2904 irq_ack |= CORE_PWRCTL_IO_FAIL;
2905 else
2906 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302907
2908 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302909 }
2910 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2911 /* Switch voltage High */
2912 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2913 if (ret)
2914 irq_ack |= CORE_PWRCTL_IO_FAIL;
2915 else
2916 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302917
2918 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302919 }
2920
2921 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302922 sdhci_msm_writeb_relaxed(irq_ack, host,
2923 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302924 /*
2925 * SDHC has core_mem and hc_mem device memory and these memory
2926 * addresses do not fall within 1KB region. Hence, any update to
2927 * core_mem address space would require an mb() to ensure this gets
2928 * completed before its next update to registers within hc_mem.
2929 */
2930 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302931 if ((io_level & REQ_IO_HIGH) &&
2932 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2933 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302934 writel_relaxed((readl_relaxed(host->ioaddr +
2935 msm_host_offset->CORE_VENDOR_SPEC) &
2936 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2937 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002938 else if ((io_level & REQ_IO_LOW) ||
2939 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302940 writel_relaxed((readl_relaxed(host->ioaddr +
2941 msm_host_offset->CORE_VENDOR_SPEC) |
2942 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2943 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002944 mb();
2945
Asutosh Das0ef24812012-12-18 16:14:02 +05302946 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2947 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302948 spin_lock_irqsave(&host->lock, flags);
2949 if (pwr_state)
2950 msm_host->curr_pwr_state = pwr_state;
2951 if (io_level)
2952 msm_host->curr_io_level = io_level;
2953 complete(&msm_host->pwr_irq_completion);
2954 spin_unlock_irqrestore(&host->lock, flags);
2955
Asutosh Das0ef24812012-12-18 16:14:02 +05302956 return IRQ_HANDLED;
2957}
2958
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302959static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302960show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2961{
2962 struct sdhci_host *host = dev_get_drvdata(dev);
2963 int poll;
2964 unsigned long flags;
2965
2966 spin_lock_irqsave(&host->lock, flags);
2967 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2968 spin_unlock_irqrestore(&host->lock, flags);
2969
2970 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2971}
2972
2973static ssize_t
2974store_polling(struct device *dev, struct device_attribute *attr,
2975 const char *buf, size_t count)
2976{
2977 struct sdhci_host *host = dev_get_drvdata(dev);
2978 int value;
2979 unsigned long flags;
2980
2981 if (!kstrtou32(buf, 0, &value)) {
2982 spin_lock_irqsave(&host->lock, flags);
2983 if (value) {
2984 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2985 mmc_detect_change(host->mmc, 0);
2986 } else {
2987 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2988 }
2989 spin_unlock_irqrestore(&host->lock, flags);
2990 }
2991 return count;
2992}
2993
2994static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302995show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2996 char *buf)
2997{
2998 struct sdhci_host *host = dev_get_drvdata(dev);
2999 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3000 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3001
3002 return snprintf(buf, PAGE_SIZE, "%u\n",
3003 msm_host->msm_bus_vote.is_max_bw_needed);
3004}
3005
3006static ssize_t
3007store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
3008 const char *buf, size_t count)
3009{
3010 struct sdhci_host *host = dev_get_drvdata(dev);
3011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3013 uint32_t value;
3014 unsigned long flags;
3015
3016 if (!kstrtou32(buf, 0, &value)) {
3017 spin_lock_irqsave(&host->lock, flags);
3018 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
3019 spin_unlock_irqrestore(&host->lock, flags);
3020 }
3021 return count;
3022}
3023
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303024static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05303025{
3026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303028 const struct sdhci_msm_offset *msm_host_offset =
3029 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303030 unsigned long flags;
3031 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05303032 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05303033
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303034 spin_lock_irqsave(&host->lock, flags);
3035 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
3036 mmc_hostname(host->mmc), __func__, req_type,
3037 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05303038 if (!msm_host->mci_removed)
3039 io_sig_sts = sdhci_msm_readl_relaxed(host,
3040 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303041
Sahitya Tummala481fbb02013-08-06 15:22:28 +05303042 /*
3043 * The IRQ for request type IO High/Low will be generated when -
3044 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
3045 * 2. If 1 is true and when there is a state change in 1.8V enable
3046 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
3047 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
3048 * layer tries to set it to 3.3V before card detection happens, the
3049 * IRQ doesn't get triggered as there is no state change in this bit.
3050 * The driver already handles this case by changing the IO voltage
3051 * level to high as part of controller power up sequence. Hence, check
3052 * for host->pwr to handle a case where IO voltage high request is
3053 * issued even before controller power up.
3054 */
3055 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
3056 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
3057 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
3058 pr_debug("%s: do not wait for power IRQ that never comes\n",
3059 mmc_hostname(host->mmc));
3060 spin_unlock_irqrestore(&host->lock, flags);
3061 return;
3062 }
3063 }
3064
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303065 if ((req_type & msm_host->curr_pwr_state) ||
3066 (req_type & msm_host->curr_io_level))
3067 done = true;
3068 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05303069
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303070 /*
3071 * This is needed here to hanlde a case where IRQ gets
3072 * triggered even before this function is called so that
3073 * x->done counter of completion gets reset. Otherwise,
3074 * next call to wait_for_completion returns immediately
3075 * without actually waiting for the IRQ to be handled.
3076 */
3077 if (done)
3078 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05303079 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05303080 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05303081 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
3082 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05303083 MMC_TRACE(host->mmc,
3084 "%s: request(%d) timed out waiting for pwr_irq\n",
3085 __func__, req_type);
3086 sdhci_msm_dump_pwr_ctrl_regs(host);
3087 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05303088 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
3089 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05303090}
3091
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003092static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
3093{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303094 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3095 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3096 const struct sdhci_msm_offset *msm_host_offset =
3097 msm_host->offset;
3098 u32 config = readl_relaxed(host->ioaddr +
3099 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303100
3101 if (enable) {
3102 config |= CORE_CDR_EN;
3103 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303104 writel_relaxed(config, host->ioaddr +
3105 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303106 } else {
3107 config &= ~CORE_CDR_EN;
3108 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303109 writel_relaxed(config, host->ioaddr +
3110 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303111 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003112}
3113
Asutosh Das648f9d12013-01-10 21:11:04 +05303114static unsigned int sdhci_msm_max_segs(void)
3115{
3116 return SDHCI_MSM_MAX_SEGMENTS;
3117}
3118
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303119static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303120{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303121 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3122 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303123
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303124 return msm_host->pdata->sup_clk_table[0];
3125}
3126
3127static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
3128{
3129 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3130 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3131 int max_clk_index = msm_host->pdata->sup_clk_cnt;
3132
3133 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
3134}
3135
3136static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
3137 u32 req_clk)
3138{
3139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3140 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3141 unsigned int sel_clk = -1;
3142 unsigned char cnt;
3143
3144 if (req_clk < sdhci_msm_get_min_clock(host)) {
3145 sel_clk = sdhci_msm_get_min_clock(host);
3146 return sel_clk;
3147 }
3148
3149 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
3150 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
3151 break;
3152 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
3153 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3154 break;
3155 } else {
3156 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3157 }
3158 }
3159 return sel_clk;
3160}
3161
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303162static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
3163 u32 apps_clk)
3164{
3165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3166 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3167 long sel_clk = -1;
3168 unsigned char cnt;
3169
3170 if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
3171 pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
3172 mmc_hostname(host->mmc), __func__,
3173 (unsigned int)msm_host->pdata->bus_clk_cnt,
3174 (unsigned int)msm_host->pdata->sup_clk_cnt);
3175 return msm_host->pdata->bus_clk_table[0];
3176 }
3177 if (apps_clk == sdhci_msm_get_min_clock(host)) {
3178 sel_clk = msm_host->pdata->bus_clk_table[0];
3179 return sel_clk;
3180 }
3181
3182 for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
3183 if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
3184 break;
3185 sel_clk = msm_host->pdata->bus_clk_table[cnt];
3186 }
3187 return sel_clk;
3188}
3189
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003190static void sdhci_msm_registers_save(struct sdhci_host *host)
3191{
3192 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3193 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3194 const struct sdhci_msm_offset *msm_host_offset =
3195 msm_host->offset;
3196
3197 if (!msm_host->regs_restore.is_supported)
3198 return;
3199
3200 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
3201 msm_host_offset->CORE_VENDOR_SPEC);
3202 msm_host->regs_restore.vendor_pwrctl_mask =
3203 readl_relaxed(host->ioaddr +
3204 msm_host_offset->CORE_PWRCTL_MASK);
3205 msm_host->regs_restore.vendor_func2 =
3206 readl_relaxed(host->ioaddr +
3207 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3208 msm_host->regs_restore.vendor_func3 =
3209 readl_relaxed(host->ioaddr +
3210 msm_host_offset->CORE_VENDOR_SPEC3);
3211 msm_host->regs_restore.hc_2c_2e =
3212 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
3213 msm_host->regs_restore.hc_3c_3e =
3214 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
3215 msm_host->regs_restore.vendor_pwrctl_ctl =
3216 readl_relaxed(host->ioaddr +
3217 msm_host_offset->CORE_PWRCTL_CTL);
3218 msm_host->regs_restore.hc_38_3a =
3219 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
3220 msm_host->regs_restore.hc_34_36 =
3221 sdhci_readl(host, SDHCI_INT_ENABLE);
3222 msm_host->regs_restore.hc_28_2a =
3223 sdhci_readl(host, SDHCI_HOST_CONTROL);
3224 msm_host->regs_restore.vendor_caps_0 =
3225 readl_relaxed(host->ioaddr +
3226 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3227 msm_host->regs_restore.hc_caps_1 =
3228 sdhci_readl(host, SDHCI_CAPABILITIES_1);
3229 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
3230 msm_host_offset->CORE_TESTBUS_CONFIG);
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08003231 msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
3232 msm_host_offset->CORE_DLL_CONFIG);
3233 msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
3234 msm_host_offset->CORE_DLL_CONFIG_2);
3235 msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
3236 msm_host_offset->CORE_DLL_CONFIG);
3237 msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
3238 msm_host_offset->CORE_DLL_CONFIG_2);
3239 msm_host->regs_restore.dll_config3 = readl_relaxed(host->ioaddr +
3240 msm_host_offset->CORE_DLL_CONFIG_3);
3241 msm_host->regs_restore.dll_usr_ctl = readl_relaxed(host->ioaddr +
3242 msm_host_offset->CORE_DLL_USR_CTL);
3243
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003244 msm_host->regs_restore.is_valid = true;
3245
3246 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
3247 mmc_hostname(host->mmc), __func__,
3248 readl_relaxed(host->ioaddr +
3249 msm_host_offset->CORE_PWRCTL_MASK));
3250}
3251
3252static void sdhci_msm_registers_restore(struct sdhci_host *host)
3253{
3254 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3255 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003256 u8 irq_status;
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003257 const struct sdhci_msm_offset *msm_host_offset =
3258 msm_host->offset;
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08003259 struct mmc_ios ios = host->mmc->ios;
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003260
3261 if (!msm_host->regs_restore.is_supported ||
3262 !msm_host->regs_restore.is_valid)
3263 return;
3264
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003265 writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003266 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
3267 msm_host_offset->CORE_VENDOR_SPEC);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003268 writel_relaxed(msm_host->regs_restore.vendor_func2,
3269 host->ioaddr +
3270 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3271 writel_relaxed(msm_host->regs_restore.vendor_func3,
3272 host->ioaddr +
3273 msm_host_offset->CORE_VENDOR_SPEC3);
3274 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
3275 SDHCI_CLOCK_CONTROL);
3276 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
3277 SDHCI_AUTO_CMD_ERR);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003278 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
3279 SDHCI_SIGNAL_ENABLE);
3280 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
3281 SDHCI_INT_ENABLE);
3282 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
3283 SDHCI_HOST_CONTROL);
3284 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
3285 host->ioaddr +
3286 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3287 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
3288 SDHCI_CAPABILITIES_1);
3289 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
3290 msm_host_offset->CORE_TESTBUS_CONFIG);
3291 msm_host->regs_restore.is_valid = false;
3292
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003293 /*
3294 * Clear the PWRCTL_STATUS register.
3295 * There is a rare HW scenario where the first clear pulse could be
3296 * lost when actual reset and clear/read of status register is
3297 * happening at a time. Hence, retry for at least 10 times to make
3298 * sure status register is cleared. Otherwise, this will result in
3299 * a spurious power IRQ resulting in system instability.
3300 */
3301 irq_status = sdhci_msm_readb_relaxed(host,
3302 msm_host_offset->CORE_PWRCTL_STATUS);
3303
3304 sdhci_msm_clear_pwrctl_status(host, irq_status);
3305
3306 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
3307 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
3308 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
3309 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
3310
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08003311 if (((ios.timing == MMC_TIMING_MMC_HS400) ||
3312 (ios.timing == MMC_TIMING_MMC_HS200) ||
3313 (ios.timing == MMC_TIMING_UHS_SDR104))
3314 && (ios.clock > CORE_FREQ_100MHZ)) {
3315 writel_relaxed(msm_host->regs_restore.dll_config2,
3316 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
3317 writel_relaxed(msm_host->regs_restore.dll_config3,
3318 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_3);
3319 writel_relaxed(msm_host->regs_restore.dll_usr_ctl,
3320 host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
3321 writel_relaxed(msm_host->regs_restore.dll_config &
3322 ~(CORE_DLL_RST | CORE_DLL_PDN),
3323 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
3324
3325 msm_init_cm_dll(host, DLL_INIT_FROM_CX_COLLAPSE_EXIT);
3326 msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
3327 }
3328
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003329 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
3330 mmc_hostname(host->mmc), __func__,
3331 readl_relaxed(host->ioaddr +
3332 msm_host_offset->CORE_PWRCTL_MASK));
3333}
3334
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303335static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
3336{
3337 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3338 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3339 int rc = 0;
3340
3341 if (atomic_read(&msm_host->controller_clock))
3342 return 0;
3343
3344 sdhci_msm_bus_voting(host, 1);
3345
3346 if (!IS_ERR(msm_host->pclk)) {
3347 rc = clk_prepare_enable(msm_host->pclk);
3348 if (rc) {
3349 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3350 mmc_hostname(host->mmc), __func__, rc);
3351 goto remove_vote;
3352 }
3353 }
3354
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303355 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3356 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3357 if (rc) {
3358 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3359 mmc_hostname(host->mmc), __func__, rc);
3360 goto disable_pclk;
3361 }
3362 }
3363
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303364 rc = clk_prepare_enable(msm_host->clk);
3365 if (rc) {
3366 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3367 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303368 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303369 }
3370
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303371 if (!IS_ERR(msm_host->ice_clk)) {
3372 rc = clk_prepare_enable(msm_host->ice_clk);
3373 if (rc) {
3374 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3375 mmc_hostname(host->mmc), __func__, rc);
3376 goto disable_host_clk;
3377 }
3378 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303379 atomic_set(&msm_host->controller_clock, 1);
3380 pr_debug("%s: %s: enabled controller clock\n",
3381 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003382 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303383 goto out;
3384
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303385disable_host_clk:
3386 if (!IS_ERR(msm_host->clk))
3387 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303388disable_bus_aggr_clk:
3389 if (!IS_ERR(msm_host->bus_aggr_clk))
3390 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303391disable_pclk:
3392 if (!IS_ERR(msm_host->pclk))
3393 clk_disable_unprepare(msm_host->pclk);
3394remove_vote:
3395 if (msm_host->msm_bus_vote.client_handle)
3396 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3397out:
3398 return rc;
3399}
3400
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303401static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3402{
3403 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3404 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303405
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303406 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003407 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303408 if (!IS_ERR(msm_host->clk))
3409 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303410 if (!IS_ERR(msm_host->ice_clk))
3411 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303412 if (!IS_ERR(msm_host->bus_aggr_clk))
3413 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303414 if (!IS_ERR(msm_host->pclk))
3415 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303416 sdhci_msm_bus_voting(host, 0);
3417 atomic_set(&msm_host->controller_clock, 0);
3418 pr_debug("%s: %s: disabled controller clock\n",
3419 mmc_hostname(host->mmc), __func__);
3420 }
3421}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303422
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303423static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3424{
3425 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3426 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3427 int rc = 0;
3428
3429 if (enable && !atomic_read(&msm_host->clks_on)) {
3430 pr_debug("%s: request to enable clocks\n",
3431 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303432
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303433 /*
3434 * The bus-width or the clock rate might have changed
3435 * after controller clocks are enbaled, update bus vote
3436 * in such case.
3437 */
3438 if (atomic_read(&msm_host->controller_clock))
3439 sdhci_msm_bus_voting(host, 1);
3440
3441 rc = sdhci_msm_enable_controller_clock(host);
3442 if (rc)
3443 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303444
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303445 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3446 rc = clk_prepare_enable(msm_host->bus_clk);
3447 if (rc) {
3448 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3449 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303450 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303451 }
3452 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003453 if (!IS_ERR(msm_host->ff_clk)) {
3454 rc = clk_prepare_enable(msm_host->ff_clk);
3455 if (rc) {
3456 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3457 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303458 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003459 }
3460 }
3461 if (!IS_ERR(msm_host->sleep_clk)) {
3462 rc = clk_prepare_enable(msm_host->sleep_clk);
3463 if (rc) {
3464 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3465 mmc_hostname(host->mmc), __func__, rc);
3466 goto disable_ff_clk;
3467 }
3468 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303469 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303470
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303471 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303472 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3473 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303474 /*
3475 * During 1.8V signal switching the clock source must
3476 * still be ON as it requires accessing SDHC
3477 * registers (SDHCi host control2 register bit 3 must
3478 * be written and polled after stopping the SDCLK).
3479 */
3480 if (host->mmc->card_clock_off)
3481 return 0;
3482 pr_debug("%s: request to disable clocks\n",
3483 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003484 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3485 clk_disable_unprepare(msm_host->sleep_clk);
3486 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3487 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303488 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3489 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003490 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303491 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303492 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303493 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003494disable_ff_clk:
3495 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3496 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303497disable_bus_clk:
3498 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3499 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303500disable_controller_clk:
3501 if (!IS_ERR_OR_NULL(msm_host->clk))
3502 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303503 if (!IS_ERR(msm_host->ice_clk))
3504 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303505 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3506 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303507 if (!IS_ERR_OR_NULL(msm_host->pclk))
3508 clk_disable_unprepare(msm_host->pclk);
3509 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303510remove_vote:
3511 if (msm_host->msm_bus_vote.client_handle)
3512 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303513out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303514 return rc;
3515}
3516
3517static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3518{
3519 int rc;
3520 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3521 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303522 const struct sdhci_msm_offset *msm_host_offset =
3523 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003524 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303525 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003526 u32 sup_clock, ddr_clock, dll_lock;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303527 long bus_clk_rate;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303528 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303529
3530 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303531 /*
3532 * disable pwrsave to ensure clock is not auto-gated until
3533 * the rate is >400KHz (initialization complete).
3534 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303535 writel_relaxed(readl_relaxed(host->ioaddr +
3536 msm_host_offset->CORE_VENDOR_SPEC) &
3537 ~CORE_CLK_PWRSAVE, host->ioaddr +
3538 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303539 sdhci_msm_prepare_clocks(host, false);
3540 host->clock = clock;
3541 goto out;
3542 }
3543
3544 rc = sdhci_msm_prepare_clocks(host, true);
3545 if (rc)
3546 goto out;
3547
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303548 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3549 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303550 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003551 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303552 writel_relaxed(readl_relaxed(host->ioaddr +
3553 msm_host_offset->CORE_VENDOR_SPEC)
3554 | CORE_CLK_PWRSAVE, host->ioaddr +
3555 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303556 /*
3557 * Disable pwrsave for a newly added card if doesn't allow clock
3558 * gating.
3559 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003560 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303561 writel_relaxed(readl_relaxed(host->ioaddr +
3562 msm_host_offset->CORE_VENDOR_SPEC)
3563 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3564 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303565
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303566 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003567 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003568 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003569 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303570 /*
3571 * The SDHC requires internal clock frequency to be double the
3572 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003573 * uses the faster clock(100/400MHz) for some of its parts and
3574 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303575 */
3576 ddr_clock = clock * 2;
3577 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3578 ddr_clock);
3579 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003580
3581 /*
3582 * In general all timing modes are controlled via UHS mode select in
3583 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3584 * their respective modes defined here, hence we use these values.
3585 *
3586 * HS200 - SDR104 (Since they both are equivalent in functionality)
3587 * HS400 - This involves multiple configurations
3588 * Initially SDR104 - when tuning is required as HS200
3589 * Then when switching to DDR @ 400MHz (HS400) we use
3590 * the vendor specific HC_SELECT_IN to control the mode.
3591 *
3592 * In addition to controlling the modes we also need to select the
3593 * correct input clock for DLL depending on the mode.
3594 *
3595 * HS400 - divided clock (free running MCLK/2)
3596 * All other modes - default (free running MCLK)
3597 */
3598 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3599 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303600 writel_relaxed(((readl_relaxed(host->ioaddr +
3601 msm_host_offset->CORE_VENDOR_SPEC)
3602 & ~CORE_HC_MCLK_SEL_MASK)
3603 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3604 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003605 /*
3606 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3607 * register
3608 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303609 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003610 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303611 msm_host->enhanced_strobe)) &&
3612 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003613 /*
3614 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3615 * field in VENDOR_SPEC_FUNC
3616 */
3617 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303618 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003619 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303620 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3621 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003622 }
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303623 /*
3624 * After MCLK ugating, toggle the FIFO write clock to get
3625 * the FIFO pointers and flags to valid state.
3626 */
3627 if (msm_host->tuning_done ||
3628 (card && mmc_card_strobe(card) &&
3629 msm_host->enhanced_strobe)) {
3630 /*
3631 * set HC_REG_DLL_CONFIG_3[1] to select MCLK as
3632 * DLL input clock
3633 */
3634 writel_relaxed(((readl_relaxed(host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003635 msm_host_offset->CORE_DLL_CONFIG_3))
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303636 | RCLK_TOGGLE), host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003637 msm_host_offset->CORE_DLL_CONFIG_3);
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303638 /* ensure above write as toggling same bit quickly */
3639 wmb();
3640 udelay(2);
3641 /*
3642 * clear HC_REG_DLL_CONFIG_3[1] to select RCLK as
3643 * DLL input clock
3644 */
3645 writel_relaxed(((readl_relaxed(host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003646 msm_host_offset->CORE_DLL_CONFIG_3))
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303647 & ~RCLK_TOGGLE), host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003648 msm_host_offset->CORE_DLL_CONFIG_3);
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303649 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003650 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3651 /*
3652 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3653 * CORE_DLL_STATUS to be set. This should get set
3654 * with in 15 us at 200 MHz.
3655 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303656 rc = readl_poll_timeout(host->ioaddr +
3657 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003658 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3659 CORE_DDR_DLL_LOCK)), 10, 1000);
3660 if (rc == -ETIMEDOUT)
3661 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3662 mmc_hostname(host->mmc),
3663 dll_lock);
3664 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003665 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003666 if (!msm_host->use_cdclp533)
3667 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3668 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303669 msm_host_offset->CORE_VENDOR_SPEC3)
3670 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3671 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003672
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003673 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303674 writel_relaxed(((readl_relaxed(host->ioaddr +
3675 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003676 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303677 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3678 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003679
3680 /*
3681 * Disable HC_SELECT_IN to be able to use the UHS mode select
3682 * configuration from Host Control2 register for all other
3683 * modes.
3684 *
3685 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3686 * in VENDOR_SPEC_FUNC
3687 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303688 writel_relaxed((readl_relaxed(host->ioaddr +
3689 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003690 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303691 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3692 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003693 }
3694 mb();
3695
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303696 if (sup_clock != msm_host->clk_rate) {
3697 pr_debug("%s: %s: setting clk rate to %u\n",
3698 mmc_hostname(host->mmc), __func__, sup_clock);
3699 rc = clk_set_rate(msm_host->clk, sup_clock);
3700 if (rc) {
3701 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3702 mmc_hostname(host->mmc), __func__,
3703 sup_clock, rc);
3704 goto out;
3705 }
3706 msm_host->clk_rate = sup_clock;
3707 host->clock = clock;
Can Guob903ad82017-10-17 13:22:53 +08003708
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303709 if (!IS_ERR(msm_host->bus_aggr_clk) &&
3710 msm_host->pdata->bus_clk_cnt) {
3711 bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
3712 sup_clock);
3713 if (bus_clk_rate >= 0) {
3714 rc = clk_set_rate(msm_host->bus_aggr_clk,
3715 bus_clk_rate);
3716 if (rc) {
3717 pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
3718 mmc_hostname(host->mmc),
3719 __func__, bus_clk_rate, rc);
3720 goto out;
3721 }
3722 } else {
3723 pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
3724 mmc_hostname(host->mmc), __func__,
3725 sup_clock, bus_clk_rate);
3726 }
3727 }
3728
Can Guob903ad82017-10-17 13:22:53 +08003729 /* Configure pinctrl drive type according to
3730 * current clock rate
3731 */
3732 rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
3733 if (rc)
3734 pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
3735 mmc_hostname(host->mmc), __func__,
3736 clock, rc);
3737
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303738 /*
3739 * Update the bus vote in case of frequency change due to
3740 * clock scaling.
3741 */
3742 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303743 }
3744out:
3745 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303746}
3747
Sahitya Tummala14613432013-03-21 11:13:25 +05303748static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3749 unsigned int uhs)
3750{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003751 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3752 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303753 const struct sdhci_msm_offset *msm_host_offset =
3754 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303755 u16 ctrl_2;
3756
3757 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3758 /* Select Bus Speed Mode for host */
3759 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003760 if ((uhs == MMC_TIMING_MMC_HS400) ||
3761 (uhs == MMC_TIMING_MMC_HS200) ||
3762 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303763 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3764 else if (uhs == MMC_TIMING_UHS_SDR12)
3765 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3766 else if (uhs == MMC_TIMING_UHS_SDR25)
3767 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3768 else if (uhs == MMC_TIMING_UHS_SDR50)
3769 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003770 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3771 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303772 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303773 /*
3774 * When clock frquency is less than 100MHz, the feedback clock must be
3775 * provided and DLL must not be used so that tuning can be skipped. To
3776 * provide feedback clock, the mode selection can be any value less
3777 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3778 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003779 if (host->clock <= CORE_FREQ_100MHZ) {
3780 if ((uhs == MMC_TIMING_MMC_HS400) ||
3781 (uhs == MMC_TIMING_MMC_HS200) ||
3782 (uhs == MMC_TIMING_UHS_SDR104))
3783 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303784
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003785 /*
3786 * Make sure DLL is disabled when not required
3787 *
3788 * Write 1 to DLL_RST bit of DLL_CONFIG register
3789 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303790 writel_relaxed((readl_relaxed(host->ioaddr +
3791 msm_host_offset->CORE_DLL_CONFIG)
3792 | CORE_DLL_RST), host->ioaddr +
3793 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003794
3795 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303796 writel_relaxed((readl_relaxed(host->ioaddr +
3797 msm_host_offset->CORE_DLL_CONFIG)
3798 | CORE_DLL_PDN), host->ioaddr +
3799 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003800 mb();
3801
3802 /*
3803 * The DLL needs to be restored and CDCLP533 recalibrated
3804 * when the clock frequency is set back to 400MHz.
3805 */
3806 msm_host->calibration_done = false;
3807 }
3808
3809 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3810 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303811 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3812
3813}
3814
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003815#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003816#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303817static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003818{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303819 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303820 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3821 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303822 const struct sdhci_msm_offset *msm_host_offset =
3823 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303824 struct cmdq_host *cq_host = host->cq_host;
3825
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303826 u32 version = sdhci_msm_readl_relaxed(host,
3827 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003828 u16 minor = version & CORE_VERSION_TARGET_MASK;
3829 /* registers offset changed starting from 4.2.0 */
3830 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3831
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303832 if (cq_host->offset_changed)
3833 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003834 pr_err("---- Debug RAM dump ----\n");
3835 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3836 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3837 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3838
3839 while (i < 16) {
3840 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3841 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3842 i++;
3843 }
3844 pr_err("-------------------------\n");
3845}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303846
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303847static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3848{
3849 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3850 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3851 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3852
3853 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3854 sizeof(struct mmc_host));
3855 if (msm_host->mmc->card)
3856 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3857 sizeof(struct mmc_card));
3858 memcpy(&cached_data->copy_host, host,
3859 sizeof(struct sdhci_host));
3860}
3861
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303862void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3863{
3864 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3865 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303866 const struct sdhci_msm_offset *msm_host_offset =
3867 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303868 int tbsel, tbsel2;
3869 int i, index = 0;
3870 u32 test_bus_val = 0;
3871 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303872 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303873
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303874 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303875 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003876 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303877 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003878
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303879 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3880 sdhci_msm_readl_relaxed(host,
3881 msm_host_offset->CORE_MCI_DATA_CNT),
3882 sdhci_msm_readl_relaxed(host,
3883 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303884 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303885 sdhci_msm_readl_relaxed(host,
3886 msm_host_offset->CORE_MCI_DATA_CNT),
3887 sdhci_msm_readl_relaxed(host,
3888 msm_host_offset->CORE_MCI_FIFO_CNT),
3889 sdhci_msm_readl_relaxed(host,
3890 msm_host_offset->CORE_MCI_STATUS));
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003891 pr_info("DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303892 readl_relaxed(host->ioaddr +
3893 msm_host_offset->CORE_DLL_STATUS),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303894 readl_relaxed(host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003895 msm_host_offset->CORE_DLL_CONFIG),
3896 sdhci_msm_readl_relaxed(host,
3897 msm_host_offset->CORE_DLL_CONFIG_2));
3898 pr_info("DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
3899 readl_relaxed(host->ioaddr +
3900 msm_host_offset->CORE_DLL_CONFIG_3),
3901 readl_relaxed(host->ioaddr +
3902 msm_host_offset->CORE_DLL_USR_CTL),
3903 sdhci_msm_readl_relaxed(host,
3904 msm_host_offset->CORE_DDR_CONFIG));
3905 pr_info("SDCC ver: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
3906 readl_relaxed(host->ioaddr +
3907 msm_host_offset->CORE_MCI_VERSION),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303908 readl_relaxed(host->ioaddr +
3909 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3910 readl_relaxed(host->ioaddr +
3911 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003912 pr_info("Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303913 readl_relaxed(host->ioaddr +
Veerabhadrarao Badiganti51b62ae2018-06-20 20:08:42 -07003914 msm_host_offset->CORE_VENDOR_SPEC),
3915 readl_relaxed(host->ioaddr +
3916 msm_host_offset->CORE_VENDOR_SPEC_FUNC2),
3917 readl_relaxed(host->ioaddr +
3918 msm_host_offset->CORE_VENDOR_SPEC3));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303919 /*
3920 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3921 * of CORE_TESTBUS_CONFIG register.
3922 *
3923 * To select test bus 0 to 7 use tbsel and to select any test bus
3924 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3925 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3926 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3927 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003928 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303929 for (tbsel = 0; tbsel < 8; tbsel++) {
3930 if (index >= MAX_TEST_BUS)
3931 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303932 test_bus_val =
3933 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3934 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3935 sdhci_msm_writel_relaxed(test_bus_val, host,
3936 msm_host_offset->CORE_TESTBUS_CONFIG);
3937 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3938 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303939 }
3940 }
3941 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3942 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3943 i, i + 3, debug_reg[i], debug_reg[i+1],
3944 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303945 if (host->is_crypto_en) {
3946 sdhci_msm_ice_get_status(host, &sts);
3947 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003948 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303949 }
3950}
3951
3952static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3953{
3954 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3955 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3956
3957 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303958 if (msm_host->ice.pdev) {
3959 if (msm_host->ice_hci_support)
3960 writel_relaxed(1, host->ioaddr +
3961 HC_VENDOR_SPECIFIC_ICE_CTRL);
3962 else
3963 writel_relaxed(1,
3964 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3965 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303966
3967 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003968}
3969
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303970/*
3971 * sdhci_msm_enhanced_strobe_mask :-
3972 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3973 * SW should write 3 to
3974 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3975 * The default reset value of this register is 2.
3976 */
3977static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3978{
3979 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3980 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303981 const struct sdhci_msm_offset *msm_host_offset =
3982 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303983
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303984 if (!msm_host->enhanced_strobe ||
3985 !mmc_card_strobe(msm_host->mmc->card)) {
3986 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303987 mmc_hostname(host->mmc));
3988 return;
3989 }
3990
3991 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303992 writel_relaxed((readl_relaxed(host->ioaddr +
3993 msm_host_offset->CORE_VENDOR_SPEC3)
3994 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3995 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303996 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303997 writel_relaxed((readl_relaxed(host->ioaddr +
3998 msm_host_offset->CORE_VENDOR_SPEC3)
3999 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
4000 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304001 }
4002}
4003
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004004static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
4005{
4006 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4007 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304008 const struct sdhci_msm_offset *msm_host_offset =
4009 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004010
4011 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304012 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
4013 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004014 } else {
4015 u32 value;
4016
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304017 value = sdhci_msm_readl_relaxed(host,
4018 msm_host_offset->CORE_TESTBUS_CONFIG);
4019 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
4020 sdhci_msm_writel_relaxed(value, host,
4021 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004022 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304023}
4024
Pavan Anamula691dd592015-08-25 16:11:20 +05304025void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
4026{
4027 u32 vendor_func2;
4028 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304029 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4030 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4031 const struct sdhci_msm_offset *msm_host_offset =
4032 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05304033
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304034 vendor_func2 = readl_relaxed(host->ioaddr +
4035 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05304036
4037 if (enable) {
4038 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304039 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05304040 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304041 while (readl_relaxed(host->ioaddr +
4042 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304043 if (timeout == 0) {
4044 pr_info("%s: Applying wait idle disable workaround\n",
4045 mmc_hostname(host->mmc));
4046 /*
4047 * Apply the reset workaround to not wait for
4048 * pending data transfers on AXI before
4049 * resetting the controller. This could be
4050 * risky if the transfers were stuck on the
4051 * AXI bus.
4052 */
4053 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304054 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05304055 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304056 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
4057 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05304058 host->reset_wa_t = ktime_get();
4059 return;
4060 }
4061 timeout--;
4062 udelay(10);
4063 }
4064 pr_info("%s: waiting for SW_RST_REQ is successful\n",
4065 mmc_hostname(host->mmc));
4066 } else {
4067 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304068 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05304069 }
4070}
4071
Gilad Broner44445992015-09-29 16:05:39 +03004072static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
4073{
4074 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05304075 container_of(work, struct sdhci_msm_pm_qos_irq,
4076 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004077
4078 if (atomic_read(&pm_qos_irq->counter))
4079 return;
4080
4081 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
4082 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
4083}
4084
4085void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
4086{
4087 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4088 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4089 struct sdhci_msm_pm_qos_latency *latency =
4090 &msm_host->pdata->pm_qos_data.irq_latency;
4091 int counter;
4092
4093 if (!msm_host->pm_qos_irq.enabled)
4094 return;
4095
4096 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
4097 /* Make sure to update the voting in case power policy has changed */
4098 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
4099 && counter > 1)
4100 return;
4101
Asutosh Das36c2e922015-12-01 12:19:58 +05304102 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004103 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
4104 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4105 msm_host->pm_qos_irq.latency);
4106}
4107
4108void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
4109{
4110 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4111 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4112 int counter;
4113
4114 if (!msm_host->pm_qos_irq.enabled)
4115 return;
4116
Subhash Jadavani4d813902015-10-15 12:16:43 -07004117 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
4118 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
4119 } else {
4120 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
4121 return;
Gilad Broner44445992015-09-29 16:05:39 +03004122 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07004123
Gilad Broner44445992015-09-29 16:05:39 +03004124 if (counter)
4125 return;
4126
4127 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05304128 queue_delayed_work(msm_host->pm_qos_wq,
4129 &msm_host->pm_qos_irq.unvote_work,
4130 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03004131 return;
4132 }
4133
4134 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4135 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4136 msm_host->pm_qos_irq.latency);
4137}
4138
Gilad Broner68c54562015-09-20 11:59:46 +03004139static ssize_t
4140sdhci_msm_pm_qos_irq_show(struct device *dev,
4141 struct device_attribute *attr, char *buf)
4142{
4143 struct sdhci_host *host = dev_get_drvdata(dev);
4144 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4145 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4146 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
4147
4148 return snprintf(buf, PAGE_SIZE,
4149 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
4150 irq->enabled, atomic_read(&irq->counter), irq->latency);
4151}
4152
4153static ssize_t
4154sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
4155 struct device_attribute *attr, char *buf)
4156{
4157 struct sdhci_host *host = dev_get_drvdata(dev);
4158 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4159 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4160
4161 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
4162}
4163
4164static ssize_t
4165sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
4166 struct device_attribute *attr, const char *buf, size_t count)
4167{
4168 struct sdhci_host *host = dev_get_drvdata(dev);
4169 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4170 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4171 uint32_t value;
4172 bool enable;
4173 int ret;
4174
4175 ret = kstrtou32(buf, 0, &value);
4176 if (ret)
4177 goto out;
4178 enable = !!value;
4179
4180 if (enable == msm_host->pm_qos_irq.enabled)
4181 goto out;
4182
4183 msm_host->pm_qos_irq.enabled = enable;
4184 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304185 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004186 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4187 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4188 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4189 msm_host->pm_qos_irq.latency);
4190 }
4191
4192out:
4193 return count;
4194}
4195
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004196#ifdef CONFIG_SMP
4197static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4198 struct sdhci_host *host)
4199{
4200 msm_host->pm_qos_irq.req.irq = host->irq;
4201}
4202#else
4203static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4204 struct sdhci_host *host) { }
4205#endif
4206
Vijay Viswanath1971d222018-03-01 12:01:47 +05304207static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
4208{
4209 char *wq = NULL;
4210 bool ret = true;
4211
4212 wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
4213 dev_name(&msm_host->pdev->dev));
4214 if (!wq)
4215 return false;
4216 /*
4217 * Create a work queue with flag WQ_MEM_RECLAIM set for
4218 * pm_qos_unvote work. Because mmc thread is created with
4219 * flag PF_MEMALLOC set, kernel will check for work queue
4220 * flag WQ_MEM_RECLAIM when flush the work queue. If work
4221 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
4222 * will be triggered.
4223 */
4224 msm_host->pm_qos_wq = create_workqueue(wq);
4225 if (!msm_host->pm_qos_wq) {
4226 ret = false;
4227 dev_err(&msm_host->pdev->dev,
4228 "failed to create pm qos unvote work queue\n");
4229 }
4230 kfree(wq);
4231 return ret;
4232}
4233
Gilad Broner44445992015-09-29 16:05:39 +03004234void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
4235{
4236 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4237 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4238 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03004239 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004240
4241 if (!msm_host->pdata->pm_qos_data.irq_valid)
4242 return;
4243
4244 /* Initialize only once as this gets called per partition */
4245 if (msm_host->pm_qos_irq.enabled)
4246 return;
4247
4248 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4249 msm_host->pm_qos_irq.req.type =
4250 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004251 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
4252 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
4253 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03004254 else
4255 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
4256 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
4257
Vijay Viswanath1971d222018-03-01 12:01:47 +05304258 sdhci_msm_pm_qos_wq_init(msm_host);
4259
Asutosh Das36c2e922015-12-01 12:19:58 +05304260 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004261 sdhci_msm_pm_qos_irq_unvote_work);
4262 /* For initialization phase, set the performance latency */
4263 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
4264 msm_host->pm_qos_irq.latency =
4265 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
4266 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
4267 msm_host->pm_qos_irq.latency);
4268 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004269
4270 /* sysfs */
4271 msm_host->pm_qos_irq.enable_attr.show =
4272 sdhci_msm_pm_qos_irq_enable_show;
4273 msm_host->pm_qos_irq.enable_attr.store =
4274 sdhci_msm_pm_qos_irq_enable_store;
4275 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
4276 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
4277 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
4278 ret = device_create_file(&msm_host->pdev->dev,
4279 &msm_host->pm_qos_irq.enable_attr);
4280 if (ret)
4281 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
4282 __func__, ret);
4283
4284 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
4285 msm_host->pm_qos_irq.status_attr.store = NULL;
4286 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
4287 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
4288 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
4289 ret = device_create_file(&msm_host->pdev->dev,
4290 &msm_host->pm_qos_irq.status_attr);
4291 if (ret)
4292 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
4293 __func__, ret);
4294}
4295
4296static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
4297 struct device_attribute *attr, char *buf)
4298{
4299 struct sdhci_host *host = dev_get_drvdata(dev);
4300 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4301 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4302 struct sdhci_msm_pm_qos_group *group;
4303 int i;
4304 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4305 int offset = 0;
4306
4307 for (i = 0; i < nr_groups; i++) {
4308 group = &msm_host->pm_qos[i];
4309 offset += snprintf(&buf[offset], PAGE_SIZE,
4310 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
4311 i, group->req.cpus_affine.bits[0],
4312 msm_host->pm_qos_group_enable,
4313 atomic_read(&group->counter),
4314 group->latency);
4315 }
4316
4317 return offset;
4318}
4319
4320static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
4321 struct device_attribute *attr, char *buf)
4322{
4323 struct sdhci_host *host = dev_get_drvdata(dev);
4324 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4325 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4326
4327 return snprintf(buf, PAGE_SIZE, "%s\n",
4328 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
4329}
4330
4331static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
4332 struct device_attribute *attr, const char *buf, size_t count)
4333{
4334 struct sdhci_host *host = dev_get_drvdata(dev);
4335 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4336 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4337 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4338 uint32_t value;
4339 bool enable;
4340 int ret;
4341 int i;
4342
4343 ret = kstrtou32(buf, 0, &value);
4344 if (ret)
4345 goto out;
4346 enable = !!value;
4347
4348 if (enable == msm_host->pm_qos_group_enable)
4349 goto out;
4350
4351 msm_host->pm_qos_group_enable = enable;
4352 if (!enable) {
4353 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304354 cancel_delayed_work_sync(
4355 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004356 atomic_set(&msm_host->pm_qos[i].counter, 0);
4357 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
4358 pm_qos_update_request(&msm_host->pm_qos[i].req,
4359 msm_host->pm_qos[i].latency);
4360 }
4361 }
4362
4363out:
4364 return count;
Gilad Broner44445992015-09-29 16:05:39 +03004365}
4366
4367static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
4368{
4369 int i;
4370 struct sdhci_msm_cpu_group_map *map =
4371 &msm_host->pdata->pm_qos_data.cpu_group_map;
4372
4373 if (cpu < 0)
4374 goto not_found;
4375
4376 for (i = 0; i < map->nr_groups; i++)
4377 if (cpumask_test_cpu(cpu, &map->mask[i]))
4378 return i;
4379
4380not_found:
4381 return -EINVAL;
4382}
4383
4384void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
4385 struct sdhci_msm_pm_qos_latency *latency, int cpu)
4386{
4387 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4388 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4389 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4390 struct sdhci_msm_pm_qos_group *pm_qos_group;
4391 int counter;
4392
4393 if (!msm_host->pm_qos_group_enable || group < 0)
4394 return;
4395
4396 pm_qos_group = &msm_host->pm_qos[group];
4397 counter = atomic_inc_return(&pm_qos_group->counter);
4398
4399 /* Make sure to update the voting in case power policy has changed */
4400 if (pm_qos_group->latency == latency->latency[host->power_policy]
4401 && counter > 1)
4402 return;
4403
Asutosh Das36c2e922015-12-01 12:19:58 +05304404 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004405
4406 pm_qos_group->latency = latency->latency[host->power_policy];
4407 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
4408}
4409
4410static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
4411{
4412 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05304413 container_of(work, struct sdhci_msm_pm_qos_group,
4414 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004415
4416 if (atomic_read(&group->counter))
4417 return;
4418
4419 group->latency = PM_QOS_DEFAULT_VALUE;
4420 pm_qos_update_request(&group->req, group->latency);
4421}
4422
Gilad Broner07d92eb2015-09-29 16:57:21 +03004423bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03004424{
4425 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4426 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4427 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4428
4429 if (!msm_host->pm_qos_group_enable || group < 0 ||
4430 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03004431 return false;
Gilad Broner44445992015-09-29 16:05:39 +03004432
4433 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05304434 queue_delayed_work(msm_host->pm_qos_wq,
4435 &msm_host->pm_qos[group].unvote_work,
4436 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03004437 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004438 }
4439
4440 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4441 pm_qos_update_request(&msm_host->pm_qos[group].req,
4442 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004443 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004444}
4445
4446void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4447 struct sdhci_msm_pm_qos_latency *latency)
4448{
4449 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4450 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4451 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4452 struct sdhci_msm_pm_qos_group *group;
4453 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004454 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004455
4456 if (msm_host->pm_qos_group_enable)
4457 return;
4458
4459 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4460 GFP_KERNEL);
4461 if (!msm_host->pm_qos)
4462 return;
4463
4464 for (i = 0; i < nr_groups; i++) {
4465 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304466 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004467 sdhci_msm_pm_qos_cpu_unvote_work);
4468 atomic_set(&group->counter, 0);
4469 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4470 cpumask_copy(&group->req.cpus_affine,
4471 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304472 /* We set default latency here for all pm_qos cpu groups. */
4473 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004474 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4475 group->latency);
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304476 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
Gilad Broner44445992015-09-29 16:05:39 +03004477 __func__, i,
4478 group->req.cpus_affine.bits[0],
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304479 group->latency);
Gilad Broner44445992015-09-29 16:05:39 +03004480 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004481 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004482 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004483
4484 /* sysfs */
4485 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4486 msm_host->pm_qos_group_status_attr.store = NULL;
4487 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4488 msm_host->pm_qos_group_status_attr.attr.name =
4489 "pm_qos_cpu_groups_status";
4490 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4491 ret = device_create_file(&msm_host->pdev->dev,
4492 &msm_host->pm_qos_group_status_attr);
4493 if (ret)
4494 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4495 __func__, ret);
4496 msm_host->pm_qos_group_enable_attr.show =
4497 sdhci_msm_pm_qos_group_enable_show;
4498 msm_host->pm_qos_group_enable_attr.store =
4499 sdhci_msm_pm_qos_group_enable_store;
4500 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4501 msm_host->pm_qos_group_enable_attr.attr.name =
4502 "pm_qos_cpu_groups_enable";
4503 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4504 ret = device_create_file(&msm_host->pdev->dev,
4505 &msm_host->pm_qos_group_enable_attr);
4506 if (ret)
4507 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4508 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004509}
4510
Gilad Broner07d92eb2015-09-29 16:57:21 +03004511static void sdhci_msm_pre_req(struct sdhci_host *host,
4512 struct mmc_request *mmc_req)
4513{
4514 int cpu;
4515 int group;
4516 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4517 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4518 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4519 msm_host->pm_qos_prev_cpu);
4520
4521 sdhci_msm_pm_qos_irq_vote(host);
4522
4523 cpu = get_cpu();
4524 put_cpu();
4525 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4526 if (group < 0)
4527 return;
4528
4529 if (group != prev_group && prev_group >= 0) {
4530 sdhci_msm_pm_qos_cpu_unvote(host,
4531 msm_host->pm_qos_prev_cpu, false);
4532 prev_group = -1; /* make sure to vote for new group */
4533 }
4534
4535 if (prev_group < 0) {
4536 sdhci_msm_pm_qos_cpu_vote(host,
4537 msm_host->pdata->pm_qos_data.latency, cpu);
4538 msm_host->pm_qos_prev_cpu = cpu;
4539 }
4540}
4541
4542static void sdhci_msm_post_req(struct sdhci_host *host,
4543 struct mmc_request *mmc_req)
4544{
4545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4546 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4547
4548 sdhci_msm_pm_qos_irq_unvote(host, false);
4549
4550 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4551 msm_host->pm_qos_prev_cpu = -1;
4552}
4553
4554static void sdhci_msm_init(struct sdhci_host *host)
4555{
4556 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4557 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4558
4559 sdhci_msm_pm_qos_irq_init(host);
4560
4561 if (msm_host->pdata->pm_qos_data.legacy_valid)
4562 sdhci_msm_pm_qos_cpu_init(host,
4563 msm_host->pdata->pm_qos_data.latency);
4564}
4565
Sahitya Tummala9150a942014-10-31 15:33:04 +05304566static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4567{
4568 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4569 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4570 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4571 u32 max_curr = 0;
4572
4573 if (curr_slot && curr_slot->vdd_data)
4574 max_curr = curr_slot->vdd_data->hpm_uA;
4575
4576 return max_curr;
4577}
4578
Sahitya Tummala073ca552015-08-06 13:59:37 +05304579static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4580{
4581 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4582 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4583 int ret = 0;
4584 u32 clk_rate = 0;
4585
4586 if (!IS_ERR(msm_host->ice_clk)) {
4587 clk_rate = (state == MMC_LOAD_LOW) ?
4588 msm_host->pdata->ice_clk_min :
4589 msm_host->pdata->ice_clk_max;
4590 if (msm_host->ice_clk_rate == clk_rate)
4591 return 0;
4592 pr_debug("%s: changing ICE clk rate to %u\n",
4593 mmc_hostname(host->mmc), clk_rate);
4594 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4595 if (ret) {
4596 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4597 mmc_hostname(host->mmc), ret, clk_rate);
4598 return ret;
4599 }
4600 msm_host->ice_clk_rate = clk_rate;
4601 }
4602 return 0;
4603}
4604
Asutosh Das0ef24812012-12-18 16:14:02 +05304605static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304606 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304607 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304608 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304609 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304610 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304611 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304612 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004613 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304614 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004615 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304616 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304617 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304618 .get_min_clock = sdhci_msm_get_min_clock,
4619 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304620 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304621 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304622 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004623 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304624 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004625 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304626 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304627 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004628 .init = sdhci_msm_init,
4629 .pre_req = sdhci_msm_pre_req,
4630 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304631 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304632 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304633};
4634
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304635static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4636 struct sdhci_host *host)
4637{
Krishna Konda46fd1432014-10-30 21:13:27 -07004638 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304639 u16 minor;
4640 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304641 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304642 const struct sdhci_msm_offset *msm_host_offset =
4643 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304644
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304645 version = sdhci_msm_readl_relaxed(host,
4646 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304647 major = (version & CORE_VERSION_MAJOR_MASK) >>
4648 CORE_VERSION_MAJOR_SHIFT;
4649 minor = version & CORE_VERSION_TARGET_MASK;
4650
Krishna Konda46fd1432014-10-30 21:13:27 -07004651 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4652
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304653 /*
4654 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004655 * controller won't advertise 3.0v, 1.8v and 8-bit features
4656 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304657 */
4658 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004659 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004660 /*
4661 * Enable 1.8V support capability on controllers that
4662 * support dual voltage
4663 */
4664 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004665 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4666 caps |= CORE_3_0V_SUPPORT;
4667 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004668 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304669 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4670 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304671 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004672
4673 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304674 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4675 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4676 */
4677 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304678 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304679 val = readl_relaxed(host->ioaddr +
4680 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304681 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304682 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304683 }
4684 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004685 * SDCC 5 controller with major version 1, minor version 0x34 and later
4686 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4687 */
4688 if ((major == 1) && (minor < 0x34))
4689 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004690
4691 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004692 * SDCC 5 controller with major version 1, minor version 0x42 and later
4693 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304694 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004695 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304696 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004697 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304698 msm_host->enhanced_strobe = true;
4699 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004700
4701 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004702 * SDCC 5 controller with major version 1 and minor version 0x42,
4703 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4704 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304705 * when MCLK is gated OFF, it is not gated for less than 0.5us
4706 * and MCLK must be switched on for at-least 1us before DATA
4707 * starts coming.
4708 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004709 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badigantib6cc5172019-06-18 18:49:41 +05304710 (minor == 0x49) || (minor == 0x4D) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304711 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004712
Pavan Anamula5a256df2015-10-16 14:38:28 +05304713 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304714 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304715 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304716 writel_relaxed((readl_relaxed(host->ioaddr +
4717 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4718 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304719 }
4720
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004721 if ((major == 1) && (minor >= 0x49))
4722 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304723 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004724 * Mask 64-bit support for controller with 32-bit address bus so that
4725 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004726 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004727 if (!msm_host->pdata->largeaddressbus)
4728 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4729
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304730 writel_relaxed(caps, host->ioaddr +
4731 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004732 /* keep track of the value in SDHCI_CAPABILITIES */
4733 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304734
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304735 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304736 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304737 host->cdr_support = true;
4738 }
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +05304739
4740 if ((major == 1) && (minor >= 0x71))
4741 msm_host->need_dll_user_ctl = true;
4742
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304743}
4744
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004745#ifdef CONFIG_MMC_CQ_HCI
4746static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4747 struct platform_device *pdev)
4748{
4749 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4750 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4751
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304752 if (nocmdq) {
4753 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4754 return;
4755 }
4756
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004757 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004758 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004759 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4760 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004761 host->cq_host = NULL;
4762 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004763 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004764 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004765}
4766#else
4767static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4768 struct platform_device *pdev)
4769{
4770
4771}
4772#endif
4773
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004774static bool sdhci_msm_is_bootdevice(struct device *dev)
4775{
4776 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4777 strlen(saved_command_line))) {
4778 char search_string[50];
4779
4780 snprintf(search_string, ARRAY_SIZE(search_string),
4781 "androidboot.bootdevice=%s", dev_name(dev));
4782 if (strnstr(saved_command_line, search_string,
4783 strlen(saved_command_line)))
4784 return true;
4785 else
4786 return false;
4787 }
4788
4789 /*
4790 * "androidboot.bootdevice=" argument is not present then
4791 * return true as we don't know the boot device anyways.
4792 */
4793 return true;
4794}
4795
Asutosh Das0ef24812012-12-18 16:14:02 +05304796static int sdhci_msm_probe(struct platform_device *pdev)
4797{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304798 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304799 struct sdhci_host *host;
4800 struct sdhci_pltfm_host *pltfm_host;
4801 struct sdhci_msm_host *msm_host;
4802 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004803 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004804 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004805 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304806 struct resource *tlmm_memres = NULL;
4807 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304808 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004809 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304810
4811 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4812 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4813 GFP_KERNEL);
4814 if (!msm_host) {
4815 ret = -ENOMEM;
4816 goto out;
4817 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304818
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304819 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4820 msm_host->mci_removed = true;
4821 msm_host->offset = &sdhci_msm_offset_mci_removed;
4822 } else {
4823 msm_host->mci_removed = false;
4824 msm_host->offset = &sdhci_msm_offset_mci_present;
4825 }
4826 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304827 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4828 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4829 if (IS_ERR(host)) {
4830 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304831 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304832 }
4833
4834 pltfm_host = sdhci_priv(host);
4835 pltfm_host->priv = msm_host;
4836 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304837 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304838
Asutosh Das1c43b132018-01-11 18:08:40 +05304839 ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
4840 if (ret == -EPROBE_DEFER) {
4841 dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
4842 goto pltfm_free;
4843 }
4844
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304845 /* get the ice device vops if present */
4846 ret = sdhci_msm_ice_get_dev(host);
4847 if (ret == -EPROBE_DEFER) {
4848 /*
4849 * SDHCI driver might be probed before ICE driver does.
4850 * In that case we would like to return EPROBE_DEFER code
4851 * in order to delay its probing.
4852 */
4853 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4854 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004855 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304856
4857 } else if (ret == -ENODEV) {
4858 /*
4859 * ICE device is not enabled in DTS file. No need for further
4860 * initialization of ICE driver.
4861 */
4862 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4863 __func__);
4864 } else if (ret) {
4865 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4866 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004867 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304868 }
4869
Asutosh Das0ef24812012-12-18 16:14:02 +05304870 /* Extract platform data */
4871 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004872 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304873 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004874 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4875 ret);
4876 goto pltfm_free;
4877 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004878
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004879 /* Read property to determine if the probe is forced */
4880 force_probe = of_find_property(pdev->dev.of_node,
4881 "qcom,force-sdhc1-probe", NULL);
4882
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004883 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004884 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4885 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004886 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004887 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004888 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004889
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004890 if (disable_slots & (1 << (ret - 1))) {
4891 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4892 ret);
4893 ret = -ENODEV;
4894 goto pltfm_free;
4895 }
4896
Sayali Lokhande5f768322016-04-11 18:36:53 +05304897 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004898 sdhci_slot[ret-1] = msm_host;
4899
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004900 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4901 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304902 if (!msm_host->pdata) {
4903 dev_err(&pdev->dev, "DT parsing error\n");
4904 goto pltfm_free;
4905 }
4906 } else {
4907 dev_err(&pdev->dev, "No device tree node\n");
4908 goto pltfm_free;
4909 }
4910
4911 /* Setup Clocks */
4912
4913 /* Setup SDCC bus voter clock. */
4914 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4915 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4916 /* Vote for max. clk rate for max. performance */
4917 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4918 if (ret)
4919 goto pltfm_free;
4920 ret = clk_prepare_enable(msm_host->bus_clk);
4921 if (ret)
4922 goto pltfm_free;
4923 }
4924
4925 /* Setup main peripheral bus clock */
4926 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4927 if (!IS_ERR(msm_host->pclk)) {
4928 ret = clk_prepare_enable(msm_host->pclk);
4929 if (ret)
4930 goto bus_clk_disable;
4931 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304932 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304933
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304934 /* Setup SDC ufs bus aggr clock */
4935 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4936 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4937 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4938 if (ret) {
4939 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4940 goto pclk_disable;
4941 }
4942 }
4943
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304944 if (msm_host->ice.pdev) {
4945 /* Setup SDC ICE clock */
4946 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4947 if (!IS_ERR(msm_host->ice_clk)) {
4948 /* ICE core has only one clock frequency for now */
4949 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304950 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304951 if (ret) {
4952 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4953 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304954 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304955 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304956 }
4957 ret = clk_prepare_enable(msm_host->ice_clk);
4958 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304959 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304960
4961 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304962 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304963 }
4964 }
4965
Asutosh Das0ef24812012-12-18 16:14:02 +05304966 /* Setup SDC MMC clock */
4967 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4968 if (IS_ERR(msm_host->clk)) {
4969 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304970 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304971 }
4972
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304973 /* Set to the minimum supported clock frequency */
4974 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4975 if (ret) {
4976 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304977 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304978 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304979 ret = clk_prepare_enable(msm_host->clk);
4980 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304981 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304982
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304983 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304984 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304985
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004986 /* Setup CDC calibration fixed feedback clock */
4987 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4988 if (!IS_ERR(msm_host->ff_clk)) {
4989 ret = clk_prepare_enable(msm_host->ff_clk);
4990 if (ret)
4991 goto clk_disable;
4992 }
4993
4994 /* Setup CDC calibration sleep clock */
4995 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4996 if (!IS_ERR(msm_host->sleep_clk)) {
4997 ret = clk_prepare_enable(msm_host->sleep_clk);
4998 if (ret)
4999 goto ff_clk_disable;
5000 }
5001
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07005002 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
5003
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305004 ret = sdhci_msm_bus_register(msm_host, pdev);
5005 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005006 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305007
5008 if (msm_host->msm_bus_vote.client_handle)
5009 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
5010 sdhci_msm_bus_work);
5011 sdhci_msm_bus_voting(host, 1);
5012
Asutosh Das0ef24812012-12-18 16:14:02 +05305013 /* Setup regulators */
5014 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
5015 if (ret) {
5016 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305017 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05305018 }
5019
5020 /* Reset the core and Enable SDHC mode */
5021 core_memres = platform_get_resource_byname(pdev,
5022 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305023 if (!msm_host->mci_removed) {
5024 if (!core_memres) {
5025 dev_err(&pdev->dev, "Failed to get iomem resource\n");
5026 goto vreg_deinit;
5027 }
5028 msm_host->core_mem = devm_ioremap(&pdev->dev,
5029 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05305030
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305031 if (!msm_host->core_mem) {
5032 dev_err(&pdev->dev, "Failed to remap registers\n");
5033 ret = -ENOMEM;
5034 goto vreg_deinit;
5035 }
Asutosh Das0ef24812012-12-18 16:14:02 +05305036 }
5037
Sahitya Tummala079ed852015-10-29 20:18:45 +05305038 tlmm_memres = platform_get_resource_byname(pdev,
5039 IORESOURCE_MEM, "tlmm_mem");
5040 if (tlmm_memres) {
5041 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
5042 resource_size(tlmm_memres));
5043
5044 if (!tlmm_mem) {
5045 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
5046 ret = -ENOMEM;
5047 goto vreg_deinit;
5048 }
5049 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
Sahitya Tummala079ed852015-10-29 20:18:45 +05305050 }
5051
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05305052 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07005053 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05305054 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07005055 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305056 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05305057
Ritesh Harjanib5b129b2018-10-26 18:21:44 +05305058 /* This enable ADMA error interrupt in case of length mismatch */
5059 writel_relaxed((readl_relaxed(host->ioaddr +
5060 msm_host_offset->CORE_VENDOR_SPEC) |
5061 CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN),
5062 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
5063
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05305064 /*
5065 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
5066 */
5067 writel_relaxed((readl_relaxed(host->ioaddr +
5068 msm_host_offset->CORE_VENDOR_SPEC3) &
5069 ~CORE_FIFO_ALT_EN), host->ioaddr +
5070 msm_host_offset->CORE_VENDOR_SPEC3);
5071
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305072 if (!msm_host->mci_removed) {
5073 /* Set HC_MODE_EN bit in HC_MODE register */
5074 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05305075
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305076 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
5077 writel_relaxed(readl_relaxed(msm_host->core_mem +
5078 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
5079 msm_host->core_mem + CORE_HC_MODE);
5080 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05305081 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07005082
5083 /*
Bao D. Nguyen2c34e7b2018-12-05 12:52:35 -08005084 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH bit can
Krishna Konda46fd1432014-10-30 21:13:27 -07005085 * be used as required later on.
5086 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305087 writel_relaxed((readl_relaxed(host->ioaddr +
5088 msm_host_offset->CORE_VENDOR_SPEC) |
5089 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
5090 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05305091 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05305092 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
5093 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
5094 * interrupt in GIC (by registering the interrupt handler), we need to
5095 * ensure that any pending power irq interrupt status is acknowledged
5096 * otherwise power irq interrupt handler would be fired prematurely.
5097 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305098 irq_status = sdhci_msm_readl_relaxed(host,
5099 msm_host_offset->CORE_PWRCTL_STATUS);
5100 sdhci_msm_writel_relaxed(irq_status, host,
5101 msm_host_offset->CORE_PWRCTL_CLEAR);
5102 irq_ctl = sdhci_msm_readl_relaxed(host,
5103 msm_host_offset->CORE_PWRCTL_CTL);
5104
Subhash Jadavani28137342013-05-14 17:46:43 +05305105 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
5106 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
5107 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
5108 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305109 sdhci_msm_writel_relaxed(irq_ctl, host,
5110 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07005111
Subhash Jadavani28137342013-05-14 17:46:43 +05305112 /*
5113 * Ensure that above writes are propogated before interrupt enablement
5114 * in GIC.
5115 */
5116 mb();
5117
5118 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05305119 * Following are the deviations from SDHC spec v3.0 -
5120 * 1. Card detection is handled using separate GPIO.
5121 * 2. Bus power control is handled by interacting with PMIC.
5122 */
5123 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
5124 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05305125 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03005126 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05305127 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05305128 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05305129 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05305130 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05305131 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05305132 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05305133
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05305134 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
5135 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
5136
Stephen Boyd8dce5c62013-04-24 14:19:46 -07005137 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07005138 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
5139 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
5140 SDHCI_VENDOR_VER_SHIFT));
5141 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
5142 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
5143 /*
5144 * Add 40us delay in interrupt handler when
5145 * operating at initialization frequency(400KHz).
5146 */
5147 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
5148 /*
5149 * Set Software Reset for DAT line in Software
5150 * Reset Register (Bit 2).
5151 */
5152 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
5153 }
5154
Asutosh Das214b9662013-06-13 14:27:42 +05305155 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
5156
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07005157 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005158 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
5159 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05305160 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005161 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05305162 goto vreg_deinit;
5163 }
Subhash Jadavanide139e82017-09-27 11:04:40 +05305164
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005165 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05305166 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07005167 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305168 if (ret) {
5169 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005170 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05305171 goto vreg_deinit;
5172 }
5173
5174 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305175 sdhci_msm_writel_relaxed(INT_MASK, host,
5176 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05305177
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305178#ifdef CONFIG_MMC_CLKGATE
5179 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
5180 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
5181#endif
5182
Asutosh Das0ef24812012-12-18 16:14:02 +05305183 /* Set host capabilities */
5184 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
5185 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005186 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05305187 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05305188 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005189 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005190 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03005191 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05305192 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07005193 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03005194 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305195 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05305196
5197 if (msm_host->pdata->nonremovable)
5198 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
5199
Guoping Yuf7c91332014-08-20 16:56:18 +08005200 if (msm_host->pdata->nonhotplug)
5201 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
5202
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07005203 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
5204
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305205 /* Initialize ICE if present */
5206 if (msm_host->ice.pdev) {
5207 ret = sdhci_msm_ice_init(host);
5208 if (ret) {
5209 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
5210 mmc_hostname(host->mmc), ret);
5211 ret = -EINVAL;
5212 goto vreg_deinit;
5213 }
5214 host->is_crypto_en = true;
Veerabhadrarao Badigantife3088f2018-05-22 11:48:01 +05305215 msm_host->mmc->inlinecrypt_support = true;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305216 /* Packed commands cannot be encrypted/decrypted using ICE */
5217 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
5218 MMC_CAP2_PACKED_WR_CONTROL);
5219 }
5220
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05305221 init_completion(&msm_host->pwr_irq_completion);
5222
Sahitya Tummala581df132013-03-12 14:57:46 +05305223 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05305224 /*
5225 * Set up the card detect GPIO in active configuration before
5226 * configuring it as an IRQ. Otherwise, it can be in some
5227 * weird/inconsistent state resulting in flood of interrupts.
5228 */
5229 sdhci_msm_setup_pins(msm_host->pdata, true);
5230
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05305231 /*
5232 * This delay is needed for stabilizing the card detect GPIO
5233 * line after changing the pull configs.
5234 */
5235 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05305236 ret = mmc_gpio_request_cd(msm_host->mmc,
5237 msm_host->pdata->status_gpio, 0);
5238 if (ret) {
5239 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
5240 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305241 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05305242 }
5243 }
5244
Krishna Konda7feab352013-09-17 23:55:40 -07005245 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
5246 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
5247 host->dma_mask = DMA_BIT_MASK(64);
5248 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305249 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07005250 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305251 host->dma_mask = DMA_BIT_MASK(32);
5252 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305253 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305254 } else {
5255 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
5256 }
5257
Ritesh Harjani42876f42015-11-17 17:46:51 +05305258 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
5259 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05305260 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305261 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
5262 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305263 msm_host->is_sdiowakeup_enabled = true;
5264 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
5265 sdhci_msm_sdiowakeup_irq,
5266 IRQF_SHARED | IRQF_TRIGGER_HIGH,
5267 "sdhci-msm sdiowakeup", host);
5268 if (ret) {
5269 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
5270 __func__, msm_host->pdata->sdiowakeup_irq, ret);
5271 msm_host->pdata->sdiowakeup_irq = -1;
5272 msm_host->is_sdiowakeup_enabled = false;
5273 goto vreg_deinit;
5274 } else {
5275 spin_lock_irqsave(&host->lock, flags);
5276 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305277 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305278 spin_unlock_irqrestore(&host->lock, flags);
5279 }
5280 }
5281
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07005282 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305283 ret = sdhci_add_host(host);
5284 if (ret) {
5285 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05305286 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05305287 }
5288
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05305289 msm_host->pltfm_init_done = true;
5290
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005291 pm_runtime_set_active(&pdev->dev);
5292 pm_runtime_enable(&pdev->dev);
5293 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
5294 pm_runtime_use_autosuspend(&pdev->dev);
5295
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305296 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
5297 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
5298 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
5299 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
5300 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
5301 ret = device_create_file(&pdev->dev,
5302 &msm_host->msm_bus_vote.max_bus_bw);
5303 if (ret)
5304 goto remove_host;
5305
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305306 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
5307 msm_host->polling.show = show_polling;
5308 msm_host->polling.store = store_polling;
5309 sysfs_attr_init(&msm_host->polling.attr);
5310 msm_host->polling.attr.name = "polling";
5311 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
5312 ret = device_create_file(&pdev->dev, &msm_host->polling);
5313 if (ret)
5314 goto remove_max_bus_bw_file;
5315 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05305316
5317 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
5318 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
5319 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
5320 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
5321 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
5322 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5323 if (ret) {
5324 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
5325 mmc_hostname(host->mmc), __func__, ret);
5326 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5327 }
Ankit Jain1d7e5182017-09-20 11:55:38 +05305328 if (sdhci_msm_is_bootdevice(&pdev->dev))
5329 mmc_flush_detect_work(host->mmc);
5330
Asutosh Das0ef24812012-12-18 16:14:02 +05305331 /* Successful initialization */
5332 goto out;
5333
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305334remove_max_bus_bw_file:
5335 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05305336remove_host:
5337 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005338 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305339 sdhci_remove_host(host, dead);
5340vreg_deinit:
5341 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305342bus_unregister:
5343 if (msm_host->msm_bus_vote.client_handle)
5344 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5345 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005346sleep_clk_disable:
5347 if (!IS_ERR(msm_host->sleep_clk))
5348 clk_disable_unprepare(msm_host->sleep_clk);
5349ff_clk_disable:
5350 if (!IS_ERR(msm_host->ff_clk))
5351 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305352clk_disable:
5353 if (!IS_ERR(msm_host->clk))
5354 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05305355bus_aggr_clk_disable:
5356 if (!IS_ERR(msm_host->bus_aggr_clk))
5357 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305358pclk_disable:
5359 if (!IS_ERR(msm_host->pclk))
5360 clk_disable_unprepare(msm_host->pclk);
5361bus_clk_disable:
5362 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
5363 clk_disable_unprepare(msm_host->bus_clk);
5364pltfm_free:
5365 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305366out_host_free:
5367 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305368out:
5369 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
5370 return ret;
5371}
5372
5373static int sdhci_msm_remove(struct platform_device *pdev)
5374{
5375 struct sdhci_host *host = platform_get_drvdata(pdev);
5376 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5377 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5378 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305379 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
5380 int i;
Asutosh Das0ef24812012-12-18 16:14:02 +05305381 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
5382 0xffffffff);
5383
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305384 pr_debug("%s: %s Enter\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305385 if (!gpio_is_valid(msm_host->pdata->status_gpio))
5386 device_remove_file(&pdev->dev, &msm_host->polling);
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305387
5388 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305389 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005390 pm_runtime_disable(&pdev->dev);
Vijay Viswanath1971d222018-03-01 12:01:47 +05305391
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305392 if (msm_host->pm_qos_group_enable) {
5393 struct sdhci_msm_pm_qos_group *group;
5394
5395 for (i = 0; i < nr_groups; i++)
5396 cancel_delayed_work_sync(
5397 &msm_host->pm_qos[i].unvote_work);
5398
5399 device_remove_file(&msm_host->pdev->dev,
5400 &msm_host->pm_qos_group_enable_attr);
5401 device_remove_file(&msm_host->pdev->dev,
5402 &msm_host->pm_qos_group_status_attr);
5403
5404 for (i = 0; i < nr_groups; i++) {
5405 group = &msm_host->pm_qos[i];
5406 pm_qos_remove_request(&group->req);
5407 }
5408 }
5409
5410 if (msm_host->pm_qos_irq.enabled) {
5411 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
5412 device_remove_file(&pdev->dev,
5413 &msm_host->pm_qos_irq.enable_attr);
5414 device_remove_file(&pdev->dev,
5415 &msm_host->pm_qos_irq.status_attr);
5416 pm_qos_remove_request(&msm_host->pm_qos_irq.req);
5417 }
5418
Vijay Viswanath1971d222018-03-01 12:01:47 +05305419 if (msm_host->pm_qos_wq)
5420 destroy_workqueue(msm_host->pm_qos_wq);
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305421
Asutosh Das0ef24812012-12-18 16:14:02 +05305422 sdhci_remove_host(host, dead);
Sahitya Tummala581df132013-03-12 14:57:46 +05305423
Asutosh Das0ef24812012-12-18 16:14:02 +05305424 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305425
Pratibhasagar V9acf2642013-11-21 21:07:21 +05305426 sdhci_msm_setup_pins(pdata, true);
5427 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305428
5429 if (msm_host->msm_bus_vote.client_handle) {
5430 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5431 sdhci_msm_bus_unregister(msm_host);
5432 }
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305433
5434 sdhci_pltfm_free(pdev);
5435
Asutosh Das0ef24812012-12-18 16:14:02 +05305436 return 0;
5437}
5438
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005439#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05305440static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
5441{
5442 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5443 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5444 unsigned long flags;
5445 int ret = 0;
5446
5447 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
5448 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
5449 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305450 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305451 return 1;
5452 }
5453
5454 spin_lock_irqsave(&host->lock, flags);
5455 if (enable) {
5456 /* configure DAT1 gpio if applicable */
5457 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305458 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305459 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5460 if (!ret)
5461 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
5462 goto out;
5463 } else {
5464 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
5465 mmc_hostname(host->mmc), enable);
5466 }
5467 } else {
5468 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
5469 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5470 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305471 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305472 } else {
5473 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
5474 mmc_hostname(host->mmc), enable);
5475
5476 }
5477 }
5478out:
5479 if (ret)
5480 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
5481 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
5482 ret, msm_host->pdata->sdiowakeup_irq);
5483 spin_unlock_irqrestore(&host->lock, flags);
5484 return ret;
5485}
5486
5487
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005488static int sdhci_msm_runtime_suspend(struct device *dev)
5489{
5490 struct sdhci_host *host = dev_get_drvdata(dev);
5491 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5492 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005493 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305494 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005495
Ritesh Harjani42876f42015-11-17 17:46:51 +05305496 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5497 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305498
Ritesh Harjani42876f42015-11-17 17:46:51 +05305499 sdhci_cfg_irq(host, false, true);
5500
5501defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005502 disable_irq(msm_host->pwr_irq);
5503
5504 /*
5505 * Remove the vote immediately only if clocks are off in which
5506 * case we might have queued work to remove vote but it may not
5507 * be completed before runtime suspend or system suspend.
5508 */
5509 if (!atomic_read(&msm_host->clks_on)) {
5510 if (msm_host->msm_bus_vote.client_handle)
5511 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5512 }
5513
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305514 if (host->is_crypto_en) {
5515 ret = sdhci_msm_ice_suspend(host);
5516 if (ret < 0)
5517 pr_err("%s: failed to suspend crypto engine %d\n",
5518 mmc_hostname(host->mmc), ret);
5519 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005520 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5521 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005522 return 0;
5523}
5524
5525static int sdhci_msm_runtime_resume(struct device *dev)
5526{
5527 struct sdhci_host *host = dev_get_drvdata(dev);
5528 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5529 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005530 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305531 int ret;
5532
5533 if (host->is_crypto_en) {
5534 ret = sdhci_msm_enable_controller_clock(host);
5535 if (ret) {
5536 pr_err("%s: Failed to enable reqd clocks\n",
5537 mmc_hostname(host->mmc));
5538 goto skip_ice_resume;
5539 }
5540 ret = sdhci_msm_ice_resume(host);
5541 if (ret)
5542 pr_err("%s: failed to resume crypto engine %d\n",
5543 mmc_hostname(host->mmc), ret);
5544 }
5545skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005546
Ritesh Harjani42876f42015-11-17 17:46:51 +05305547 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5548 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305549
Ritesh Harjani42876f42015-11-17 17:46:51 +05305550 sdhci_cfg_irq(host, true, true);
5551
5552defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005553 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005554
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005555 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5556 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005557 return 0;
5558}
5559
5560static int sdhci_msm_suspend(struct device *dev)
5561{
5562 struct sdhci_host *host = dev_get_drvdata(dev);
5563 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5564 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhande507e3592018-02-19 16:38:26 +05305565 struct mmc_host *mmc = host->mmc;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005566 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305567 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005568 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005569
5570 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5571 (msm_host->mmc->slot.cd_irq >= 0))
5572 disable_irq(msm_host->mmc->slot.cd_irq);
5573
5574 if (pm_runtime_suspended(dev)) {
5575 pr_debug("%s: %s: already runtime suspended\n",
5576 mmc_hostname(host->mmc), __func__);
5577 goto out;
5578 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005579 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005580out:
Sayali Lokhande507e3592018-02-19 16:38:26 +05305581 /* cancel any clock gating work scheduled by mmc_host_clk_release() */
5582 cancel_delayed_work_sync(&mmc->clk_gate_work);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305583 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305584 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5585 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5586 if (sdio_cfg)
5587 sdhci_cfg_irq(host, false, true);
5588 }
5589
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005590 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5591 ktime_to_us(ktime_sub(ktime_get(), start)));
5592 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005593}
5594
5595static int sdhci_msm_resume(struct device *dev)
5596{
5597 struct sdhci_host *host = dev_get_drvdata(dev);
5598 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5599 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5600 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305601 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005602 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005603
5604 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5605 (msm_host->mmc->slot.cd_irq >= 0))
5606 enable_irq(msm_host->mmc->slot.cd_irq);
5607
5608 if (pm_runtime_suspended(dev)) {
5609 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5610 mmc_hostname(host->mmc), __func__);
5611 goto out;
5612 }
5613
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005614 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005615out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305616 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5617 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5618 if (sdio_cfg)
5619 sdhci_cfg_irq(host, true, true);
5620 }
5621
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005622 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5623 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005624 return ret;
5625}
5626
Ritesh Harjani42876f42015-11-17 17:46:51 +05305627static int sdhci_msm_suspend_noirq(struct device *dev)
5628{
5629 struct sdhci_host *host = dev_get_drvdata(dev);
5630 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5631 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5632 int ret = 0;
5633
5634 /*
5635 * ksdioirqd may be running, hence retry
5636 * suspend in case the clocks are ON
5637 */
5638 if (atomic_read(&msm_host->clks_on)) {
5639 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5640 mmc_hostname(host->mmc), __func__);
5641 ret = -EAGAIN;
5642 }
5643
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305644 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5645 if (msm_host->sdio_pending_processing)
5646 ret = -EBUSY;
5647
Ritesh Harjani42876f42015-11-17 17:46:51 +05305648 return ret;
5649}
5650
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005651static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305652 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005653 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5654 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305655 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005656};
5657
5658#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5659
5660#else
5661#define SDHCI_MSM_PMOPS NULL
5662#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305663static const struct of_device_id sdhci_msm_dt_match[] = {
5664 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305665 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005666 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305667};
5668MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5669
5670static struct platform_driver sdhci_msm_driver = {
5671 .probe = sdhci_msm_probe,
5672 .remove = sdhci_msm_remove,
5673 .driver = {
5674 .name = "sdhci_msm",
5675 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305676 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305677 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005678 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305679 },
5680};
5681
5682module_platform_driver(sdhci_msm_driver);
5683
5684MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5685MODULE_LICENSE("GPL v2");