blob: 47094d5575769cad7c386e828d640a8634de17b3 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Asutosh Das1c43b132018-01-11 18:08:40 +05305 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Asutosh Das1c43b132018-01-11 18:08:40 +053042#include <linux/nvmem-consumer.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020043#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053044
Sahitya Tummala56874732015-05-21 08:24:03 +053045#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053046#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070047#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053048
Asutosh Das36c2e922015-12-01 12:19:58 +053049#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070053#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080054
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080063#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Asutosh Das0ef24812012-12-18 16:14:02 +053072#define CORE_PWRCTL_BUS_OFF 0x01
73#define CORE_PWRCTL_BUS_ON (1 << 1)
74#define CORE_PWRCTL_IO_LOW (1 << 2)
75#define CORE_PWRCTL_IO_HIGH (1 << 3)
76
77#define CORE_PWRCTL_BUS_SUCCESS 0x01
78#define CORE_PWRCTL_BUS_FAIL (1 << 1)
79#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
80#define CORE_PWRCTL_IO_FAIL (1 << 3)
81
82#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070083#define MAX_PHASES 16
84
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070085#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070086#define CORE_DLL_EN (1 << 16)
87#define CORE_CDR_EN (1 << 17)
88#define CORE_CK_OUT_EN (1 << 18)
89#define CORE_CDR_EXT_EN (1 << 19)
90#define CORE_DLL_PDN (1 << 29)
91#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070093#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070094#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095
Krishna Konda46fd1432014-10-30 21:13:27 -070096#define CORE_CLK_PWRSAVE (1 << 1)
97#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
98#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
99#define CORE_HC_MCLK_SEL_MASK (3 << 8)
100#define CORE_HC_AUTO_CMD21_EN (1 << 6)
101#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700102#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700103#define CORE_HC_SELECT_IN_EN (1 << 18)
104#define CORE_HC_SELECT_IN_HS400 (6 << 19)
105#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700106#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700107
Pavan Anamula691dd592015-08-25 16:11:20 +0530108#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
109#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530110#define CORE_ONE_MID_EN (1 << 25)
111
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530112#define CORE_8_BIT_SUPPORT (1 << 18)
113#define CORE_3_3V_SUPPORT (1 << 24)
114#define CORE_3_0V_SUPPORT (1 << 25)
115#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300116#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700117
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700118#define CORE_CSR_CDC_CTLR_CFG0 0x130
119#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
120#define CORE_HW_AUTOCAL_ENA (1 << 17)
121
122#define CORE_CSR_CDC_CTLR_CFG1 0x134
123#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
124#define CORE_TIMER_ENA (1 << 16)
125
126#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
127#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
128#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
129#define CORE_CDC_OFFSET_CFG 0x14C
130#define CORE_CSR_CDC_DELAY_CFG 0x150
131#define CORE_CDC_SLAVE_DDA_CFG 0x160
132#define CORE_CSR_CDC_STATUS0 0x164
133#define CORE_CALIBRATION_DONE (1 << 0)
134
135#define CORE_CDC_ERROR_CODE_MASK 0x7000000
136
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300137#define CQ_CMD_DBG_RAM 0x110
138#define CQ_CMD_DBG_RAM_WA 0x150
139#define CQ_CMD_DBG_RAM_OL 0x154
140
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700141#define CORE_CSR_CDC_GEN_CFG 0x178
142#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
143#define CORE_CDC_SWITCH_RC_EN (1 << 1)
144
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700147#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530148
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530150#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530151#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700152
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800154#define CORE_FLL_CYCLE_CNT (1 << 18)
155#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700156
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530157#define DDR_CONFIG_POR_VAL 0x80040853
158#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
159#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700160#define DDR_CONFIG_2_POR_VAL 0x80040873
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530161#define DLL_USR_CTL_POR_VAL 0x10800
162#define ENABLE_DLL_LOCK_STATUS (1 << 26)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700163
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700164/* 512 descriptors */
165#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530166#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530167
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700168#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800169#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700170
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700171#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530172#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700173
Krishna Konda96e6b112013-10-28 15:25:03 -0700174#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200175#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200176#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700177
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530178struct sdhci_msm_offset {
179 u32 CORE_MCI_DATA_CNT;
180 u32 CORE_MCI_STATUS;
181 u32 CORE_MCI_FIFO_CNT;
182 u32 CORE_MCI_VERSION;
183 u32 CORE_GENERICS;
184 u32 CORE_TESTBUS_CONFIG;
185 u32 CORE_TESTBUS_SEL2_BIT;
186 u32 CORE_TESTBUS_ENA;
187 u32 CORE_TESTBUS_SEL2;
188 u32 CORE_PWRCTL_STATUS;
189 u32 CORE_PWRCTL_MASK;
190 u32 CORE_PWRCTL_CLEAR;
191 u32 CORE_PWRCTL_CTL;
192 u32 CORE_SDCC_DEBUG_REG;
193 u32 CORE_DLL_CONFIG;
194 u32 CORE_DLL_STATUS;
195 u32 CORE_VENDOR_SPEC;
196 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
197 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
198 u32 CORE_VENDOR_SPEC_FUNC2;
199 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
200 u32 CORE_DDR_200_CFG;
201 u32 CORE_VENDOR_SPEC3;
202 u32 CORE_DLL_CONFIG_2;
203 u32 CORE_DDR_CONFIG;
204 u32 CORE_DDR_CONFIG_2;
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530205 u32 CORE_DLL_USR_CTL; /* Present on SDCC5.1 onwards */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530206};
207
208struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
209 .CORE_MCI_DATA_CNT = 0x35C,
210 .CORE_MCI_STATUS = 0x324,
211 .CORE_MCI_FIFO_CNT = 0x308,
212 .CORE_MCI_VERSION = 0x318,
213 .CORE_GENERICS = 0x320,
214 .CORE_TESTBUS_CONFIG = 0x32C,
215 .CORE_TESTBUS_SEL2_BIT = 3,
216 .CORE_TESTBUS_ENA = (1 << 31),
217 .CORE_TESTBUS_SEL2 = (1 << 3),
218 .CORE_PWRCTL_STATUS = 0x240,
219 .CORE_PWRCTL_MASK = 0x244,
220 .CORE_PWRCTL_CLEAR = 0x248,
221 .CORE_PWRCTL_CTL = 0x24C,
222 .CORE_SDCC_DEBUG_REG = 0x358,
223 .CORE_DLL_CONFIG = 0x200,
224 .CORE_DLL_STATUS = 0x208,
225 .CORE_VENDOR_SPEC = 0x20C,
226 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
227 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
228 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
229 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
230 .CORE_DDR_200_CFG = 0x224,
231 .CORE_VENDOR_SPEC3 = 0x250,
232 .CORE_DLL_CONFIG_2 = 0x254,
233 .CORE_DDR_CONFIG = 0x258,
234 .CORE_DDR_CONFIG_2 = 0x25C,
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530235 .CORE_DLL_USR_CTL = 0x388,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530236};
237
238struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
239 .CORE_MCI_DATA_CNT = 0x30,
240 .CORE_MCI_STATUS = 0x34,
241 .CORE_MCI_FIFO_CNT = 0x44,
242 .CORE_MCI_VERSION = 0x050,
243 .CORE_GENERICS = 0x70,
244 .CORE_TESTBUS_CONFIG = 0x0CC,
245 .CORE_TESTBUS_SEL2_BIT = 4,
246 .CORE_TESTBUS_ENA = (1 << 3),
247 .CORE_TESTBUS_SEL2 = (1 << 4),
248 .CORE_PWRCTL_STATUS = 0xDC,
249 .CORE_PWRCTL_MASK = 0xE0,
250 .CORE_PWRCTL_CLEAR = 0xE4,
251 .CORE_PWRCTL_CTL = 0xE8,
252 .CORE_SDCC_DEBUG_REG = 0x124,
253 .CORE_DLL_CONFIG = 0x100,
254 .CORE_DLL_STATUS = 0x108,
255 .CORE_VENDOR_SPEC = 0x10C,
256 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
257 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
258 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
259 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
260 .CORE_DDR_200_CFG = 0x184,
261 .CORE_VENDOR_SPEC3 = 0x1B0,
262 .CORE_DLL_CONFIG_2 = 0x1B4,
263 .CORE_DDR_CONFIG = 0x1B8,
264 .CORE_DDR_CONFIG_2 = 0x1BC,
265};
266
267u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
268{
269 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
270 struct sdhci_msm_host *msm_host = pltfm_host->priv;
271 void __iomem *base_addr;
272
273 if (msm_host->mci_removed)
274 base_addr = host->ioaddr;
275 else
276 base_addr = msm_host->core_mem;
277
278 return readb_relaxed(base_addr + offset);
279}
280
281u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
282{
283 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
284 struct sdhci_msm_host *msm_host = pltfm_host->priv;
285 void __iomem *base_addr;
286
287 if (msm_host->mci_removed)
288 base_addr = host->ioaddr;
289 else
290 base_addr = msm_host->core_mem;
291
292 return readl_relaxed(base_addr + offset);
293}
294
295void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
296{
297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
298 struct sdhci_msm_host *msm_host = pltfm_host->priv;
299 void __iomem *base_addr;
300
301 if (msm_host->mci_removed)
302 base_addr = host->ioaddr;
303 else
304 base_addr = msm_host->core_mem;
305
306 writeb_relaxed(val, base_addr + offset);
307}
308
309void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
310{
311 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
312 struct sdhci_msm_host *msm_host = pltfm_host->priv;
313 void __iomem *base_addr;
314
315 if (msm_host->mci_removed)
316 base_addr = host->ioaddr;
317 else
318 base_addr = msm_host->core_mem;
319
320 writel_relaxed(val, base_addr + offset);
321}
322
Ritesh Harjani82124772014-11-04 15:34:00 +0530323/* Timeout value to avoid infinite waiting for pwr_irq */
324#define MSM_PWR_IRQ_TIMEOUT_MS 5000
325
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700326static const u32 tuning_block_64[] = {
327 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
328 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
329 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
330 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
331};
332
333static const u32 tuning_block_128[] = {
334 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
335 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
336 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
337 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
338 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
339 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
340 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
341 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
342};
Asutosh Das0ef24812012-12-18 16:14:02 +0530343
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700344/* global to hold each slot instance for debug */
345static struct sdhci_msm_host *sdhci_slot[2];
346
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700347static int disable_slots;
348/* root can write, others read */
349module_param(disable_slots, int, S_IRUGO|S_IWUSR);
350
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530351static bool nocmdq;
352module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
353
Asutosh Das0ef24812012-12-18 16:14:02 +0530354enum vdd_io_level {
355 /* set vdd_io_data->low_vol_level */
356 VDD_IO_LOW,
357 /* set vdd_io_data->high_vol_level */
358 VDD_IO_HIGH,
359 /*
360 * set whatever there in voltage_level (third argument) of
361 * sdhci_msm_set_vdd_io_vol() function.
362 */
363 VDD_IO_SET_LEVEL,
364};
365
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700366/* MSM platform specific tuning */
367static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
368 u8 poll)
369{
370 int rc = 0;
371 u32 wait_cnt = 50;
372 u8 ck_out_en = 0;
373 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530374 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
375 struct sdhci_msm_host *msm_host = pltfm_host->priv;
376 const struct sdhci_msm_offset *msm_host_offset =
377 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700378
379 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530380 ck_out_en = !!(readl_relaxed(host->ioaddr +
381 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700382
383 while (ck_out_en != poll) {
384 if (--wait_cnt == 0) {
385 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
386 mmc_hostname(mmc), __func__, poll);
387 rc = -ETIMEDOUT;
388 goto out;
389 }
390 udelay(1);
391
392 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530393 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700394 }
395out:
396 return rc;
397}
398
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530399/*
400 * Enable CDR to track changes of DAT lines and adjust sampling
401 * point according to voltage/temperature variations
402 */
403static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
404{
405 int rc = 0;
406 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530407 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
408 struct sdhci_msm_host *msm_host = pltfm_host->priv;
409 const struct sdhci_msm_offset *msm_host_offset =
410 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530411
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530412 config = readl_relaxed(host->ioaddr +
413 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530414 config |= CORE_CDR_EN;
415 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530416 writel_relaxed(config, host->ioaddr +
417 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530418
419 rc = msm_dll_poll_ck_out_en(host, 0);
420 if (rc)
421 goto err;
422
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530423 writel_relaxed((readl_relaxed(host->ioaddr +
424 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
425 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530426
427 rc = msm_dll_poll_ck_out_en(host, 1);
428 if (rc)
429 goto err;
430 goto out;
431err:
432 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
433out:
434 return rc;
435}
436
437static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
438 *attr, const char *buf, size_t count)
439{
440 struct sdhci_host *host = dev_get_drvdata(dev);
441 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
442 struct sdhci_msm_host *msm_host = pltfm_host->priv;
443 u32 tmp;
444 unsigned long flags;
445
446 if (!kstrtou32(buf, 0, &tmp)) {
447 spin_lock_irqsave(&host->lock, flags);
448 msm_host->en_auto_cmd21 = !!tmp;
449 spin_unlock_irqrestore(&host->lock, flags);
450 }
451 return count;
452}
453
454static ssize_t show_auto_cmd21(struct device *dev,
455 struct device_attribute *attr, char *buf)
456{
457 struct sdhci_host *host = dev_get_drvdata(dev);
458 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
459 struct sdhci_msm_host *msm_host = pltfm_host->priv;
460
461 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
462}
463
464/* MSM auto-tuning handler */
465static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
466 bool enable,
467 u32 type)
468{
469 int rc = 0;
470 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
471 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530472 const struct sdhci_msm_offset *msm_host_offset =
473 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530474 u32 val = 0;
475
476 if (!msm_host->en_auto_cmd21)
477 return 0;
478
479 if (type == MMC_SEND_TUNING_BLOCK_HS200)
480 val = CORE_HC_AUTO_CMD21_EN;
481 else
482 return 0;
483
484 if (enable) {
485 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530486 writel_relaxed(readl_relaxed(host->ioaddr +
487 msm_host_offset->CORE_VENDOR_SPEC) | val,
488 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530489 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530490 writel_relaxed(readl_relaxed(host->ioaddr +
491 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
492 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530493 }
494 return rc;
495}
496
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700497static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
498{
499 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530500 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
501 struct sdhci_msm_host *msm_host = pltfm_host->priv;
502 const struct sdhci_msm_offset *msm_host_offset =
503 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700504 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
505 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
506 0x8};
507 unsigned long flags;
508 u32 config;
509 struct mmc_host *mmc = host->mmc;
510
511 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
512 spin_lock_irqsave(&host->lock, flags);
513
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530514 config = readl_relaxed(host->ioaddr +
515 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700516 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
517 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530518 writel_relaxed(config, host->ioaddr +
519 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700520
521 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
522 rc = msm_dll_poll_ck_out_en(host, 0);
523 if (rc)
524 goto err_out;
525
526 /*
527 * Write the selected DLL clock output phase (0 ... 15)
528 * to CDR_SELEXT bit field of DLL_CONFIG register.
529 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 writel_relaxed(((readl_relaxed(host->ioaddr +
531 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700532 & ~(0xF << 20))
533 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530534 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700535
536 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530537 writel_relaxed((readl_relaxed(host->ioaddr +
538 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
539 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700540
541 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
542 rc = msm_dll_poll_ck_out_en(host, 1);
543 if (rc)
544 goto err_out;
545
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530546 config = readl_relaxed(host->ioaddr +
547 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700548 config |= CORE_CDR_EN;
549 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530550 writel_relaxed(config, host->ioaddr +
551 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700552 goto out;
553
554err_out:
555 pr_err("%s: %s: Failed to set DLL phase: %d\n",
556 mmc_hostname(mmc), __func__, phase);
557out:
558 spin_unlock_irqrestore(&host->lock, flags);
559 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
560 return rc;
561}
562
563/*
564 * Find out the greatest range of consecuitive selected
565 * DLL clock output phases that can be used as sampling
566 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700567 * timing mode) or for eMMC4.5 card read operation (in
568 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700569 * Select the 3/4 of the range and configure the DLL with the
570 * selected DLL clock output phase.
571 */
572
573static int msm_find_most_appropriate_phase(struct sdhci_host *host,
574 u8 *phase_table, u8 total_phases)
575{
576 int ret;
577 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
578 u8 phases_per_row[MAX_PHASES] = {0};
579 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
580 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
581 bool phase_0_found = false, phase_15_found = false;
582 struct mmc_host *mmc = host->mmc;
583
584 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
585 if (!total_phases || (total_phases > MAX_PHASES)) {
586 pr_err("%s: %s: invalid argument: total_phases=%d\n",
587 mmc_hostname(mmc), __func__, total_phases);
588 return -EINVAL;
589 }
590
591 for (cnt = 0; cnt < total_phases; cnt++) {
592 ranges[row_index][col_index] = phase_table[cnt];
593 phases_per_row[row_index] += 1;
594 col_index++;
595
596 if ((cnt + 1) == total_phases) {
597 continue;
598 /* check if next phase in phase_table is consecutive or not */
599 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
600 row_index++;
601 col_index = 0;
602 }
603 }
604
605 if (row_index >= MAX_PHASES)
606 return -EINVAL;
607
608 /* Check if phase-0 is present in first valid window? */
609 if (!ranges[0][0]) {
610 phase_0_found = true;
611 phase_0_raw_index = 0;
612 /* Check if cycle exist between 2 valid windows */
613 for (cnt = 1; cnt <= row_index; cnt++) {
614 if (phases_per_row[cnt]) {
615 for (i = 0; i < phases_per_row[cnt]; i++) {
616 if (ranges[cnt][i] == 15) {
617 phase_15_found = true;
618 phase_15_raw_index = cnt;
619 break;
620 }
621 }
622 }
623 }
624 }
625
626 /* If 2 valid windows form cycle then merge them as single window */
627 if (phase_0_found && phase_15_found) {
628 /* number of phases in raw where phase 0 is present */
629 u8 phases_0 = phases_per_row[phase_0_raw_index];
630 /* number of phases in raw where phase 15 is present */
631 u8 phases_15 = phases_per_row[phase_15_raw_index];
632
633 if (phases_0 + phases_15 >= MAX_PHASES)
634 /*
635 * If there are more than 1 phase windows then total
636 * number of phases in both the windows should not be
637 * more than or equal to MAX_PHASES.
638 */
639 return -EINVAL;
640
641 /* Merge 2 cyclic windows */
642 i = phases_15;
643 for (cnt = 0; cnt < phases_0; cnt++) {
644 ranges[phase_15_raw_index][i] =
645 ranges[phase_0_raw_index][cnt];
646 if (++i >= MAX_PHASES)
647 break;
648 }
649
650 phases_per_row[phase_0_raw_index] = 0;
651 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
652 }
653
654 for (cnt = 0; cnt <= row_index; cnt++) {
655 if (phases_per_row[cnt] > curr_max) {
656 curr_max = phases_per_row[cnt];
657 selected_row_index = cnt;
658 }
659 }
660
661 i = ((curr_max * 3) / 4);
662 if (i)
663 i--;
664
665 ret = (int)ranges[selected_row_index][i];
666
667 if (ret >= MAX_PHASES) {
668 ret = -EINVAL;
669 pr_err("%s: %s: invalid phase selected=%d\n",
670 mmc_hostname(mmc), __func__, ret);
671 }
672
673 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
674 return ret;
675}
676
677static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
678{
679 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530680 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
681 struct sdhci_msm_host *msm_host = pltfm_host->priv;
682 const struct sdhci_msm_offset *msm_host_offset =
683 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700684
685 /* Program the MCLK value to MCLK_FREQ bit field */
686 if (host->clock <= 112000000)
687 mclk_freq = 0;
688 else if (host->clock <= 125000000)
689 mclk_freq = 1;
690 else if (host->clock <= 137000000)
691 mclk_freq = 2;
692 else if (host->clock <= 150000000)
693 mclk_freq = 3;
694 else if (host->clock <= 162000000)
695 mclk_freq = 4;
696 else if (host->clock <= 175000000)
697 mclk_freq = 5;
698 else if (host->clock <= 187000000)
699 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700700 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700701 mclk_freq = 7;
702
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530703 writel_relaxed(((readl_relaxed(host->ioaddr +
704 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700705 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530706 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700707}
708
709/* Initialize the DLL (Programmable Delay Line ) */
710static int msm_init_cm_dll(struct sdhci_host *host)
711{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
713 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530714 const struct sdhci_msm_offset *msm_host_offset =
715 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700716 struct mmc_host *mmc = host->mmc;
717 int rc = 0;
718 unsigned long flags;
719 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530720 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700721
722 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
723 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530724 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
725 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530726 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700727 /*
728 * Make sure that clock is always enabled when DLL
729 * tuning is in progress. Keeping PWRSAVE ON may
730 * turn off the clock. So let's disable the PWRSAVE
731 * here and re-enable it once tuning is completed.
732 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530733 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530734 writel_relaxed((readl_relaxed(host->ioaddr +
735 msm_host_offset->CORE_VENDOR_SPEC)
736 & ~CORE_CLK_PWRSAVE), host->ioaddr +
737 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530738 curr_pwrsave = false;
739 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700740
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800741 if (msm_host->use_updated_dll_reset) {
742 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530743 writel_relaxed((readl_relaxed(host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG)
745 & ~CORE_CK_OUT_EN), host->ioaddr +
746 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800747
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_DLL_CONFIG_2)
750 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
751 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800752 }
753
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700754 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530755 writel_relaxed((readl_relaxed(host->ioaddr +
756 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
757 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700758
759 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530760 writel_relaxed((readl_relaxed(host->ioaddr +
761 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
762 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700763 msm_cm_dll_set_freq(host);
764
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800765 if (msm_host->use_updated_dll_reset) {
766 u32 mclk_freq = 0;
767
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530768 if ((readl_relaxed(host->ioaddr +
769 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800770 & CORE_FLL_CYCLE_CNT))
771 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
772 else
773 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
774
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530775 writel_relaxed(((readl_relaxed(host->ioaddr +
776 msm_host_offset->CORE_DLL_CONFIG_2)
777 & ~(0xFF << 10)) | (mclk_freq << 10)),
778 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800779 /* wait for 5us before enabling DLL clock */
780 udelay(5);
781 }
782
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700783 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530784 writel_relaxed((readl_relaxed(host->ioaddr +
785 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
786 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700787
788 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530789 writel_relaxed((readl_relaxed(host->ioaddr +
790 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
791 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700792
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800793 if (msm_host->use_updated_dll_reset) {
794 msm_cm_dll_set_freq(host);
795 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530796 writel_relaxed((readl_relaxed(host->ioaddr +
797 msm_host_offset->CORE_DLL_CONFIG_2)
798 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
799 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800800 }
801
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530802 /*
803 * Configure DLL user control register to enable DLL status
804 * This setting is applicable to SDCC v5.1 onwards only
805 */
806 if (msm_host->need_dll_user_ctl) {
807 writel_relaxed(DLL_USR_CTL_POR_VAL | ENABLE_DLL_LOCK_STATUS,
808 host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
809 }
810
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700811 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530812 writel_relaxed((readl_relaxed(host->ioaddr +
813 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
814 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700815
816 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530817 writel_relaxed((readl_relaxed(host->ioaddr +
818 msm_host_offset->CORE_DLL_CONFIG)
819 | CORE_CK_OUT_EN), host->ioaddr +
820 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700821
822 wait_cnt = 50;
823 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530824 while (!(readl_relaxed(host->ioaddr +
825 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700826 /* max. wait for 50us sec for LOCK bit to be set */
827 if (--wait_cnt == 0) {
828 pr_err("%s: %s: DLL failed to LOCK\n",
829 mmc_hostname(mmc), __func__);
830 rc = -ETIMEDOUT;
831 goto out;
832 }
833 /* wait for 1us before polling again */
834 udelay(1);
835 }
836
837out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530838 /* Restore the correct PWRSAVE state */
839 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530840 u32 reg = readl_relaxed(host->ioaddr +
841 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530842
843 if (prev_pwrsave)
844 reg |= CORE_CLK_PWRSAVE;
845 else
846 reg &= ~CORE_CLK_PWRSAVE;
847
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530848 writel_relaxed(reg, host->ioaddr +
849 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530850 }
851
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700852 spin_unlock_irqrestore(&host->lock, flags);
853 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
854 return rc;
855}
856
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700857static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
858{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700859 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700860 int ret = 0;
861 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530862 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
863 struct sdhci_msm_host *msm_host = pltfm_host->priv;
864 const struct sdhci_msm_offset *msm_host_offset =
865 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700866
867 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
868
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700869 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 writel_relaxed((readl_relaxed(host->ioaddr +
871 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700872 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874
875 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
876 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
877 & ~CORE_CDC_SWITCH_BYPASS_OFF),
878 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
879
880 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
881 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
882 | CORE_CDC_SWITCH_RC_EN),
883 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
884
885 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530886 writel_relaxed((readl_relaxed(host->ioaddr +
887 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700888 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530889 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700890
891 /*
892 * Perform CDC Register Initialization Sequence
893 *
894 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
895 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
896 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
897 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
898 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
899 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
900 * CORE_CSR_CDC_DELAY_CFG 0x3AC
901 * CORE_CDC_OFFSET_CFG 0x0
902 * CORE_CDC_SLAVE_DDA_CFG 0x16334
903 */
904
905 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
906 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
907 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
908 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
909 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
910 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700911 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700912 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
913 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
914
915 /* CDC HW Calibration */
916
917 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
918 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
919 | CORE_SW_TRIG_FULL_CALIB),
920 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
921
922 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
923 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
924 & ~CORE_SW_TRIG_FULL_CALIB),
925 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
926
927 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
928 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
929 | CORE_HW_AUTOCAL_ENA),
930 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
931
932 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
933 writel_relaxed((readl_relaxed(host->ioaddr +
934 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
935 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
936
937 mb();
938
939 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700940 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
941 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
942
943 if (ret == -ETIMEDOUT) {
944 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700945 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700946 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700947 }
948
949 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
950 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
951 & CORE_CDC_ERROR_CODE_MASK;
952 if (cdc_err) {
953 pr_err("%s: %s: CDC Error Code %d\n",
954 mmc_hostname(host->mmc), __func__, cdc_err);
955 ret = -EINVAL;
956 goto out;
957 }
958
959 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530960 writel_relaxed((readl_relaxed(host->ioaddr +
961 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700962 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530963 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700964out:
965 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
966 __func__, ret);
967 return ret;
968}
969
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700970static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
971{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530972 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
973 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530974 const struct sdhci_msm_offset *msm_host_offset =
975 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530976 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700977 int ret = 0;
978
979 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
980
981 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530982 * Reprogramming the value in case it might have been modified by
983 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700984 */
Vijay Viswanatha5492612017-10-17 15:38:55 +0530985 if (msm_host->pdata->rclk_wa) {
986 writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
987 msm_host_offset->CORE_DDR_CONFIG_2);
988 } else if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530989 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
990 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700991 } else {
992 ddr_config = DDR_CONFIG_POR_VAL &
993 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
994 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530995 writel_relaxed(ddr_config, host->ioaddr +
996 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700997 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700998
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530999 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301000 writel_relaxed((readl_relaxed(host->ioaddr +
1001 msm_host_offset->CORE_DDR_200_CFG)
1002 | CORE_CMDIN_RCLK_EN), host->ioaddr +
1003 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +05301004
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001005 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301006 writel_relaxed((readl_relaxed(host->ioaddr +
1007 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001008 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301009 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001010
1011 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301012 ret = readl_poll_timeout(host->ioaddr +
1013 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001014 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
1015
1016 if (ret == -ETIMEDOUT) {
1017 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1018 mmc_hostname(host->mmc), __func__);
1019 goto out;
1020 }
1021
Ritesh Harjani764065e2015-05-13 14:14:45 +05301022 /*
1023 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1024 * when MCLK is gated OFF, it is not gated for less than 0.5us
1025 * and MCLK must be switched on for at-least 1us before DATA
1026 * starts coming. Controllers with 14lpp tech DLL cannot
1027 * guarantee above requirement. So PWRSAVE_DLL should not be
1028 * turned on for host controllers using this DLL.
1029 */
1030 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301031 writel_relaxed((readl_relaxed(host->ioaddr +
1032 msm_host_offset->CORE_VENDOR_SPEC3)
1033 | CORE_PWRSAVE_DLL), host->ioaddr +
1034 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001035 mb();
1036out:
1037 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1038 __func__, ret);
1039 return ret;
1040}
1041
Ritesh Harjaniea709662015-05-27 15:40:24 +05301042static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1043{
1044 int ret = 0;
1045 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1046 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1047 struct mmc_host *mmc = host->mmc;
1048
1049 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1050
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301051 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1052 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301053 mmc_hostname(mmc));
1054 return -EINVAL;
1055 }
1056
1057 if (msm_host->calibration_done ||
1058 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1059 return 0;
1060 }
1061
1062 /*
1063 * Reset the tuning block.
1064 */
1065 ret = msm_init_cm_dll(host);
1066 if (ret)
1067 goto out;
1068
1069 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1070out:
1071 if (!ret)
1072 msm_host->calibration_done = true;
1073 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1074 __func__, ret);
1075 return ret;
1076}
1077
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001078static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1079{
1080 int ret = 0;
1081 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1082 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301083 const struct sdhci_msm_offset *msm_host_offset =
1084 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001085
1086 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1087
1088 /*
1089 * Retuning in HS400 (DDR mode) will fail, just reset the
1090 * tuning block and restore the saved tuning phase.
1091 */
1092 ret = msm_init_cm_dll(host);
1093 if (ret)
1094 goto out;
1095
1096 /* Set the selected phase in delay line hw block */
1097 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1098 if (ret)
1099 goto out;
1100
Krishna Konda0e8efba2014-06-23 14:50:38 -07001101 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301102 writel_relaxed((readl_relaxed(host->ioaddr +
1103 msm_host_offset->CORE_DLL_CONFIG)
1104 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1105 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001106
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001107 if (msm_host->use_cdclp533)
1108 /* Calibrate CDCLP533 DLL HW */
1109 ret = sdhci_msm_cdclp533_calibration(host);
1110 else
1111 /* Calibrate CM_DLL_SDC4 HW */
1112 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1113out:
1114 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1115 __func__, ret);
1116 return ret;
1117}
1118
Krishna Konda96e6b112013-10-28 15:25:03 -07001119static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1120 u8 drv_type)
1121{
1122 struct mmc_command cmd = {0};
1123 struct mmc_request mrq = {NULL};
1124 struct mmc_host *mmc = host->mmc;
1125 u8 val = ((drv_type << 4) | 2);
1126
1127 cmd.opcode = MMC_SWITCH;
1128 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1129 (EXT_CSD_HS_TIMING << 16) |
1130 (val << 8) |
1131 EXT_CSD_CMD_SET_NORMAL;
1132 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1133 /* 1 sec */
1134 cmd.busy_timeout = 1000 * 1000;
1135
1136 memset(cmd.resp, 0, sizeof(cmd.resp));
1137 cmd.retries = 3;
1138
1139 mrq.cmd = &cmd;
1140 cmd.data = NULL;
1141
1142 mmc_wait_for_req(mmc, &mrq);
1143 pr_debug("%s: %s: set card drive type to %d\n",
1144 mmc_hostname(mmc), __func__,
1145 drv_type);
1146}
1147
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001148int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1149{
1150 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301151 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001152 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001153 const u32 *tuning_block_pattern = tuning_block_64;
1154 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1155 int rc;
1156 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301157 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001158 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1159 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001160 u8 drv_type = 0;
1161 bool drv_type_changed = false;
1162 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301163 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301164 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301165
1166 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001167 * Tuning is required for SDR104, HS200 and HS400 cards and
1168 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301169 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001170 if (host->clock <= CORE_FREQ_100MHZ ||
1171 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1172 (ios.timing == MMC_TIMING_MMC_HS200) ||
1173 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301174 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001175
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301176 /*
1177 * Don't allow re-tuning for CRC errors observed for any commands
1178 * that are sent during tuning sequence itself.
1179 */
1180 if (msm_host->tuning_in_progress)
1181 return 0;
1182 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001183 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001184
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001185 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001186 if (msm_host->tuning_done && !msm_host->calibration_done &&
1187 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001188 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001189 spin_lock_irqsave(&host->lock, flags);
1190 if (!rc)
1191 msm_host->calibration_done = true;
1192 spin_unlock_irqrestore(&host->lock, flags);
1193 goto out;
1194 }
1195
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001196 spin_lock_irqsave(&host->lock, flags);
1197
1198 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1199 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1200 tuning_block_pattern = tuning_block_128;
1201 size = sizeof(tuning_block_128);
1202 }
1203 spin_unlock_irqrestore(&host->lock, flags);
1204
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001205 data_buf = kmalloc(size, GFP_KERNEL);
1206 if (!data_buf) {
1207 rc = -ENOMEM;
1208 goto out;
1209 }
1210
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301211retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001212 tuned_phase_cnt = 0;
1213
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301214 /* first of all reset the tuning block */
1215 rc = msm_init_cm_dll(host);
1216 if (rc)
1217 goto kfree;
1218
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001219 phase = 0;
1220 do {
1221 struct mmc_command cmd = {0};
1222 struct mmc_data data = {0};
1223 struct mmc_request mrq = {
1224 .cmd = &cmd,
1225 .data = &data
1226 };
1227 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301228 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001229
1230 /* set the phase in delay line hw block */
1231 rc = msm_config_cm_dll_phase(host, phase);
1232 if (rc)
1233 goto kfree;
1234
1235 cmd.opcode = opcode;
1236 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1237
1238 data.blksz = size;
1239 data.blocks = 1;
1240 data.flags = MMC_DATA_READ;
1241 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1242
1243 data.sg = &sg;
1244 data.sg_len = 1;
1245 sg_init_one(&sg, data_buf, size);
1246 memset(data_buf, 0, size);
1247 mmc_wait_for_req(mmc, &mrq);
1248
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301249 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301250 /*
1251 * Set the dll to last known good phase while sending
1252 * status command to ensure that status command won't
1253 * fail due to bad phase.
1254 */
1255 if (tuned_phase_cnt)
1256 last_good_phase =
1257 tuned_phases[tuned_phase_cnt-1];
1258 else if (msm_host->saved_tuning_phase !=
1259 INVALID_TUNING_PHASE)
1260 last_good_phase = msm_host->saved_tuning_phase;
1261
1262 rc = msm_config_cm_dll_phase(host, last_good_phase);
1263 if (rc)
1264 goto kfree;
1265
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301266 sts_cmd.opcode = MMC_SEND_STATUS;
1267 sts_cmd.arg = card->rca << 16;
1268 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1269 sts_retry = 5;
1270 while (sts_retry) {
1271 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1272
1273 if (sts_cmd.error ||
1274 (R1_CURRENT_STATE(sts_cmd.resp[0])
1275 != R1_STATE_TRAN)) {
1276 sts_retry--;
1277 /*
1278 * wait for at least 146 MCLK cycles for
1279 * the card to move to TRANS state. As
1280 * the MCLK would be min 200MHz for
1281 * tuning, we need max 0.73us delay. To
1282 * be on safer side 1ms delay is given.
1283 */
1284 usleep_range(1000, 1200);
1285 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1286 mmc_hostname(mmc), phase,
1287 sts_cmd.error, sts_cmd.resp[0]);
1288 continue;
1289 }
1290 break;
1291 };
1292 }
1293
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001294 if (!cmd.error && !data.error &&
1295 !memcmp(data_buf, tuning_block_pattern, size)) {
1296 /* tuning is successful at this tuning point */
1297 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001298 pr_debug("%s: %s: found *** good *** phase = %d\n",
1299 mmc_hostname(mmc), __func__, phase);
1300 } else {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301301 /* Ignore crc errors occurred during tuning */
1302 if (cmd.error)
1303 mmc->err_stats[MMC_ERR_CMD_CRC]--;
1304 else if (data.error)
1305 mmc->err_stats[MMC_ERR_DAT_CRC]--;
Krishna Konda96e6b112013-10-28 15:25:03 -07001306 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001307 mmc_hostname(mmc), __func__, phase);
1308 }
1309 } while (++phase < 16);
1310
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301311 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1312 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001313 /*
1314 * If all phases pass then its a problem. So change the card's
1315 * drive type to a different value, if supported and repeat
1316 * tuning until at least one phase fails. Then set the original
1317 * drive type back.
1318 *
1319 * If all the phases still pass after trying all possible
1320 * drive types, then one of those 16 phases will be picked.
1321 * This is no different from what was going on before the
1322 * modification to change drive type and retune.
1323 */
1324 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1325 tuned_phase_cnt);
1326
1327 /* set drive type to other value . default setting is 0x0 */
1328 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001329 pr_debug("%s: trying different drive strength (%d)\n",
1330 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001331 if (card->ext_csd.raw_driver_strength &
1332 (1 << drv_type)) {
1333 sdhci_msm_set_mmc_drv_type(host, opcode,
1334 drv_type);
1335 if (!drv_type_changed)
1336 drv_type_changed = true;
1337 goto retry;
1338 }
1339 }
1340 }
1341
1342 /* reset drive type to default (50 ohm) if changed */
1343 if (drv_type_changed)
1344 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1345
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001346 if (tuned_phase_cnt) {
1347 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1348 tuned_phase_cnt);
1349 if (rc < 0)
1350 goto kfree;
1351 else
1352 phase = (u8)rc;
1353
1354 /*
1355 * Finally set the selected phase in delay
1356 * line hw block.
1357 */
1358 rc = msm_config_cm_dll_phase(host, phase);
1359 if (rc)
1360 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001361 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001362 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1363 mmc_hostname(mmc), __func__, phase);
1364 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301365 if (--tuning_seq_cnt)
1366 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001367 /* tuning failed */
1368 pr_err("%s: %s: no tuning point found\n",
1369 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301370 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001371 }
1372
1373kfree:
1374 kfree(data_buf);
1375out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001376 spin_lock_irqsave(&host->lock, flags);
1377 if (!rc)
1378 msm_host->tuning_done = true;
1379 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301380 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001381 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001382 return rc;
1383}
1384
Asutosh Das0ef24812012-12-18 16:14:02 +05301385static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1386{
1387 struct sdhci_msm_gpio_data *curr;
1388 int i, ret = 0;
1389
1390 curr = pdata->pin_data->gpio_data;
1391 for (i = 0; i < curr->size; i++) {
1392 if (!gpio_is_valid(curr->gpio[i].no)) {
1393 ret = -EINVAL;
1394 pr_err("%s: Invalid gpio = %d\n", __func__,
1395 curr->gpio[i].no);
1396 goto free_gpios;
1397 }
1398 if (enable) {
1399 ret = gpio_request(curr->gpio[i].no,
1400 curr->gpio[i].name);
1401 if (ret) {
1402 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1403 __func__, curr->gpio[i].no,
1404 curr->gpio[i].name, ret);
1405 goto free_gpios;
1406 }
1407 curr->gpio[i].is_enabled = true;
1408 } else {
1409 gpio_free(curr->gpio[i].no);
1410 curr->gpio[i].is_enabled = false;
1411 }
1412 }
1413 return ret;
1414
1415free_gpios:
1416 for (i--; i >= 0; i--) {
1417 gpio_free(curr->gpio[i].no);
1418 curr->gpio[i].is_enabled = false;
1419 }
1420 return ret;
1421}
1422
Can Guob903ad82017-10-17 13:22:53 +08001423static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
1424 unsigned int clock)
1425{
1426 int ret = 0;
1427
1428 if (clock > 150000000) {
1429 if (pdata->pctrl_data->pins_drv_type_200MHz)
1430 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1431 pdata->pctrl_data->pins_drv_type_200MHz);
1432 } else if (clock > 75000000) {
1433 if (pdata->pctrl_data->pins_drv_type_100MHz)
1434 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1435 pdata->pctrl_data->pins_drv_type_100MHz);
1436 } else if (clock > 400000) {
1437 if (pdata->pctrl_data->pins_drv_type_50MHz)
1438 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1439 pdata->pctrl_data->pins_drv_type_50MHz);
1440 } else {
1441 if (pdata->pctrl_data->pins_drv_type_400KHz)
1442 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1443 pdata->pctrl_data->pins_drv_type_400KHz);
1444 }
1445
1446 return ret;
1447}
1448
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301449static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1450 bool enable)
1451{
1452 int ret = 0;
1453
1454 if (enable)
1455 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1456 pdata->pctrl_data->pins_active);
1457 else
1458 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1459 pdata->pctrl_data->pins_sleep);
1460
1461 if (ret < 0)
1462 pr_err("%s state for pinctrl failed with %d\n",
1463 enable ? "Enabling" : "Disabling", ret);
1464
1465 return ret;
1466}
1467
Asutosh Das0ef24812012-12-18 16:14:02 +05301468static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1469{
1470 int ret = 0;
1471
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301472 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301473 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301474 } else if (pdata->pctrl_data) {
1475 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1476 goto out;
1477 } else if (!pdata->pin_data) {
1478 return 0;
1479 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301480
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301481 if (pdata->pin_data->is_gpio)
1482 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301483out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301484 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301485 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301486
1487 return ret;
1488}
1489
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301490static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1491 u32 **out, int *len, u32 size)
1492{
1493 int ret = 0;
1494 struct device_node *np = dev->of_node;
1495 size_t sz;
1496 u32 *arr = NULL;
1497
1498 if (!of_get_property(np, prop_name, len)) {
1499 ret = -EINVAL;
1500 goto out;
1501 }
1502 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001503 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301504 dev_err(dev, "%s invalid size\n", prop_name);
1505 ret = -EINVAL;
1506 goto out;
1507 }
1508
1509 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1510 if (!arr) {
1511 dev_err(dev, "%s failed allocating memory\n", prop_name);
1512 ret = -ENOMEM;
1513 goto out;
1514 }
1515
1516 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1517 if (ret < 0) {
1518 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1519 goto out;
1520 }
1521 *out = arr;
1522out:
1523 if (ret)
1524 *len = 0;
1525 return ret;
1526}
1527
Asutosh Das0ef24812012-12-18 16:14:02 +05301528#define MAX_PROP_SIZE 32
1529static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1530 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1531{
1532 int len, ret = 0;
1533 const __be32 *prop;
1534 char prop_name[MAX_PROP_SIZE];
1535 struct sdhci_msm_reg_data *vreg;
1536 struct device_node *np = dev->of_node;
1537
1538 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1539 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301540 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301541 return ret;
1542 }
1543
1544 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1545 if (!vreg) {
1546 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1547 ret = -ENOMEM;
1548 return ret;
1549 }
1550
1551 vreg->name = vreg_name;
1552
1553 snprintf(prop_name, MAX_PROP_SIZE,
1554 "qcom,%s-always-on", vreg_name);
1555 if (of_get_property(np, prop_name, NULL))
1556 vreg->is_always_on = true;
1557
1558 snprintf(prop_name, MAX_PROP_SIZE,
1559 "qcom,%s-lpm-sup", vreg_name);
1560 if (of_get_property(np, prop_name, NULL))
1561 vreg->lpm_sup = true;
1562
1563 snprintf(prop_name, MAX_PROP_SIZE,
1564 "qcom,%s-voltage-level", vreg_name);
1565 prop = of_get_property(np, prop_name, &len);
1566 if (!prop || (len != (2 * sizeof(__be32)))) {
1567 dev_warn(dev, "%s %s property\n",
1568 prop ? "invalid format" : "no", prop_name);
1569 } else {
1570 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1571 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1572 }
1573
1574 snprintf(prop_name, MAX_PROP_SIZE,
1575 "qcom,%s-current-level", vreg_name);
1576 prop = of_get_property(np, prop_name, &len);
1577 if (!prop || (len != (2 * sizeof(__be32)))) {
1578 dev_warn(dev, "%s %s property\n",
1579 prop ? "invalid format" : "no", prop_name);
1580 } else {
1581 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1582 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1583 }
1584
1585 *vreg_data = vreg;
1586 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1587 vreg->name, vreg->is_always_on ? "always_on," : "",
1588 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1589 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1590
1591 return ret;
1592}
1593
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301594static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1595 struct sdhci_msm_pltfm_data *pdata)
1596{
1597 struct sdhci_pinctrl_data *pctrl_data;
1598 struct pinctrl *pctrl;
1599 int ret = 0;
1600
1601 /* Try to obtain pinctrl handle */
1602 pctrl = devm_pinctrl_get(dev);
1603 if (IS_ERR(pctrl)) {
1604 ret = PTR_ERR(pctrl);
1605 goto out;
1606 }
1607 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1608 if (!pctrl_data) {
1609 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1610 ret = -ENOMEM;
1611 goto out;
1612 }
1613 pctrl_data->pctrl = pctrl;
1614 /* Look-up and keep the states handy to be used later */
1615 pctrl_data->pins_active = pinctrl_lookup_state(
1616 pctrl_data->pctrl, "active");
1617 if (IS_ERR(pctrl_data->pins_active)) {
1618 ret = PTR_ERR(pctrl_data->pins_active);
1619 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1620 goto out;
1621 }
1622 pctrl_data->pins_sleep = pinctrl_lookup_state(
1623 pctrl_data->pctrl, "sleep");
1624 if (IS_ERR(pctrl_data->pins_sleep)) {
1625 ret = PTR_ERR(pctrl_data->pins_sleep);
1626 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1627 goto out;
1628 }
Can Guob903ad82017-10-17 13:22:53 +08001629
1630 pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
1631 pctrl_data->pctrl, "ds_400KHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301632 if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001633 dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301634 pctrl_data->pins_drv_type_400KHz = NULL;
1635 }
Can Guob903ad82017-10-17 13:22:53 +08001636
1637 pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
1638 pctrl_data->pctrl, "ds_50MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301639 if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001640 dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301641 pctrl_data->pins_drv_type_50MHz = NULL;
1642 }
Can Guob903ad82017-10-17 13:22:53 +08001643
1644 pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
1645 pctrl_data->pctrl, "ds_100MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301646 if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001647 dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301648 pctrl_data->pins_drv_type_100MHz = NULL;
1649 }
Can Guob903ad82017-10-17 13:22:53 +08001650
1651 pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
1652 pctrl_data->pctrl, "ds_200MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301653 if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001654 dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301655 pctrl_data->pins_drv_type_200MHz = NULL;
1656 }
Can Guob903ad82017-10-17 13:22:53 +08001657
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301658 pdata->pctrl_data = pctrl_data;
1659out:
1660 return ret;
1661}
1662
Asutosh Das0ef24812012-12-18 16:14:02 +05301663#define GPIO_NAME_MAX_LEN 32
1664static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1665 struct sdhci_msm_pltfm_data *pdata)
1666{
1667 int ret = 0, cnt, i;
1668 struct sdhci_msm_pin_data *pin_data;
1669 struct device_node *np = dev->of_node;
1670
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301671 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1672 if (!ret) {
1673 goto out;
1674 } else if (ret == -EPROBE_DEFER) {
1675 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1676 goto out;
1677 } else {
1678 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1679 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301680 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301681 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301682 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1683 if (!pin_data) {
1684 dev_err(dev, "No memory for pin_data\n");
1685 ret = -ENOMEM;
1686 goto out;
1687 }
1688
1689 cnt = of_gpio_count(np);
1690 if (cnt > 0) {
1691 pin_data->gpio_data = devm_kzalloc(dev,
1692 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1693 if (!pin_data->gpio_data) {
1694 dev_err(dev, "No memory for gpio_data\n");
1695 ret = -ENOMEM;
1696 goto out;
1697 }
1698 pin_data->gpio_data->size = cnt;
1699 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1700 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1701
1702 if (!pin_data->gpio_data->gpio) {
1703 dev_err(dev, "No memory for gpio\n");
1704 ret = -ENOMEM;
1705 goto out;
1706 }
1707
1708 for (i = 0; i < cnt; i++) {
1709 const char *name = NULL;
1710 char result[GPIO_NAME_MAX_LEN];
1711 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1712 of_property_read_string_index(np,
1713 "qcom,gpio-names", i, &name);
1714
1715 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1716 dev_name(dev), name ? name : "?");
1717 pin_data->gpio_data->gpio[i].name = result;
1718 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1719 pin_data->gpio_data->gpio[i].name,
1720 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301721 }
1722 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301723 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301724out:
1725 if (ret)
1726 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1727 return ret;
1728}
1729
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001730#ifdef CONFIG_SMP
1731static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1732{
1733 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1734}
1735#else
1736static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1737#endif
1738
Gilad Bronerc788a672015-09-08 15:39:11 +03001739static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1740 struct sdhci_msm_pltfm_data *pdata)
1741{
1742 struct device_node *np = dev->of_node;
1743 const char *str;
1744 u32 cpu;
1745 int ret = 0;
1746 int i;
1747
1748 pdata->pm_qos_data.irq_valid = false;
1749 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1750 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1751 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001752 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001753 }
1754
1755 /* must specify cpu for "affine_cores" type */
1756 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1757 pdata->pm_qos_data.irq_cpu = -1;
1758 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1759 if (ret) {
1760 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1761 ret);
1762 goto out;
1763 }
1764 if (cpu < 0 || cpu >= num_possible_cpus()) {
1765 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1766 __func__, cpu, num_possible_cpus());
1767 ret = -EINVAL;
1768 goto out;
1769 }
1770 pdata->pm_qos_data.irq_cpu = cpu;
1771 }
1772
1773 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1774 SDHCI_POWER_POLICY_NUM) {
1775 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1776 __func__, SDHCI_POWER_POLICY_NUM);
1777 ret = -EINVAL;
1778 goto out;
1779 }
1780
1781 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1782 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1783 &pdata->pm_qos_data.irq_latency.latency[i]);
1784
1785 pdata->pm_qos_data.irq_valid = true;
1786out:
1787 return ret;
1788}
1789
1790static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1791 struct sdhci_msm_pltfm_data *pdata)
1792{
1793 struct device_node *np = dev->of_node;
1794 u32 mask;
1795 int nr_groups;
1796 int ret;
1797 int i;
1798
1799 /* Read cpu group mapping */
1800 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1801 if (nr_groups <= 0) {
1802 ret = -EINVAL;
1803 goto out;
1804 }
1805 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1806 pdata->pm_qos_data.cpu_group_map.mask =
1807 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1808 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1809 ret = -ENOMEM;
1810 goto out;
1811 }
1812
1813 for (i = 0; i < nr_groups; i++) {
1814 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1815 i, &mask);
1816
1817 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1818 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1819 cpu_possible_mask)) {
1820 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1821 __func__, mask, i);
1822 ret = -EINVAL;
1823 goto free_res;
1824 }
1825 }
1826 return 0;
1827
1828free_res:
1829 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1830out:
1831 return ret;
1832}
1833
1834static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1835 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1836{
1837 struct device_node *np = dev->of_node;
1838 struct sdhci_msm_pm_qos_latency *values;
1839 int ret;
1840 int i;
1841 int group;
1842 int cfg;
1843
1844 ret = of_property_count_u32_elems(np, name);
1845 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1846 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1847 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1848 ret);
1849 return -EINVAL;
1850 } else if (ret < 0) {
1851 return ret;
1852 }
1853
1854 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1855 GFP_KERNEL);
1856 if (!values)
1857 return -ENOMEM;
1858
1859 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1860 group = i / SDHCI_POWER_POLICY_NUM;
1861 cfg = i % SDHCI_POWER_POLICY_NUM;
1862 of_property_read_u32_index(np, name, i,
1863 &(values[group].latency[cfg]));
1864 }
1865
1866 *latency = values;
1867 return 0;
1868}
1869
1870static void sdhci_msm_pm_qos_parse(struct device *dev,
1871 struct sdhci_msm_pltfm_data *pdata)
1872{
1873 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1874 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1875 __func__);
1876
1877 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1878 pdata->pm_qos_data.cmdq_valid =
1879 !sdhci_msm_pm_qos_parse_latency(dev,
1880 "qcom,pm-qos-cmdq-latency-us",
1881 pdata->pm_qos_data.cpu_group_map.nr_groups,
1882 &pdata->pm_qos_data.cmdq_latency);
1883 pdata->pm_qos_data.legacy_valid =
1884 !sdhci_msm_pm_qos_parse_latency(dev,
1885 "qcom,pm-qos-legacy-latency-us",
1886 pdata->pm_qos_data.cpu_group_map.nr_groups,
1887 &pdata->pm_qos_data.latency);
1888 if (!pdata->pm_qos_data.cmdq_valid &&
1889 !pdata->pm_qos_data.legacy_valid) {
1890 /* clean-up previously allocated arrays */
1891 kfree(pdata->pm_qos_data.latency);
1892 kfree(pdata->pm_qos_data.cmdq_latency);
1893 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1894 __func__);
1895 }
1896 } else {
1897 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1898 __func__);
1899 }
1900}
1901
Asutosh Das1c43b132018-01-11 18:08:40 +05301902#ifdef CONFIG_NVMEM
1903/* Parse qfprom data for deciding on errata work-arounds */
1904static long qfprom_read(struct device *dev, const char *name)
1905{
1906 struct nvmem_cell *cell;
1907 ssize_t len = 0;
1908 u32 *buf, val = 0;
1909 long err = 0;
1910
1911 cell = nvmem_cell_get(dev, name);
1912 if (IS_ERR(cell)) {
1913 err = PTR_ERR(cell);
1914 dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
1915 /* If entry does not exist, then that is not an error */
1916 if (err == -ENOENT)
1917 err = 0;
1918 return err;
1919 }
1920
1921 buf = (u32 *)nvmem_cell_read(cell, &len);
1922 if (IS_ERR(buf) || !len) {
1923 dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
1924 *buf, len);
1925 if (!IS_ERR(buf)) {
1926 kfree(buf);
1927 err = -EINVAL;
1928 } else {
1929 err = PTR_ERR(buf);
1930 }
1931 } else {
Asutosh Dasb8614aa2018-01-31 15:44:15 +05301932 /*
1933 * 30 bits from bit offset 0 would be read.
1934 * We're interested in bits 28:29
1935 */
1936 val = (*buf >> 28) & 0x3;
Asutosh Das1c43b132018-01-11 18:08:40 +05301937 kfree(buf);
1938 }
1939
1940 nvmem_cell_put(cell);
1941 return err ? err : (long) val;
1942}
1943
1944/* Reads the SoC version */
1945static int sdhci_msm_get_socrev(struct device *dev,
1946 struct sdhci_msm_host *msm_host)
1947{
1948
1949 msm_host->soc_min_rev = qfprom_read(dev, "minor_rev");
1950
1951 if (msm_host->soc_min_rev < 0)
1952 dev_err(dev, "failed getting soc_min_rev, err : %d\n",
1953 msm_host->soc_min_rev);
1954 return msm_host->soc_min_rev;
1955}
1956#else
1957/* Reads the SoC version */
1958static int sdhci_msm_get_socrev(struct device *dev,
1959 struct sdhci_msm_host *msm_host)
1960{
1961 return 0;
1962}
1963#endif
1964
Asutosh Das0ef24812012-12-18 16:14:02 +05301965/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001966static
1967struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1968 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301969{
1970 struct sdhci_msm_pltfm_data *pdata = NULL;
1971 struct device_node *np = dev->of_node;
1972 u32 bus_width = 0;
1973 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301974 int clk_table_len;
1975 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301976 int ice_clk_table_len;
1977 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301978 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301979 const char *lower_bus_speed = NULL;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05301980 int bus_clk_table_len;
1981 u32 *bus_clk_table = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301982
1983 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1984 if (!pdata) {
1985 dev_err(dev, "failed to allocate memory for platform data\n");
1986 goto out;
1987 }
1988
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301989 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001990 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301991 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301992
Asutosh Das0ef24812012-12-18 16:14:02 +05301993 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1994 if (bus_width == 8)
1995 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1996 else if (bus_width == 4)
1997 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1998 else {
1999 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
2000 pdata->mmc_bus_width = 0;
2001 }
2002
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002003 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302004 &msm_host->mmc->clk_scaling.pltfm_freq_table,
2005 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002006 pr_debug("%s: no clock scaling frequencies were supplied\n",
2007 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302008 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
2009 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
2010 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002011
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05302012 /*
2013 * Few hosts can support DDR52 mode at the same lower
2014 * system voltage corner as high-speed mode. In such cases,
2015 * it is always better to put it in DDR mode which will
2016 * improve the performance without any power impact.
2017 */
2018 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
2019 &lower_bus_speed)) {
2020 if (!strcmp(lower_bus_speed, "DDR52"))
2021 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
2022 MMC_SCALING_LOWER_DDR52_MODE;
2023 }
2024
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302025 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
2026 &clk_table, &clk_table_len, 0)) {
2027 dev_err(dev, "failed parsing supported clock rates\n");
2028 goto out;
2029 }
2030 if (!clk_table || !clk_table_len) {
2031 dev_err(dev, "Invalid clock table\n");
2032 goto out;
2033 }
2034 pdata->sup_clk_table = clk_table;
2035 pdata->sup_clk_cnt = clk_table_len;
2036
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302037 if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
2038 &bus_clk_table, &bus_clk_table_len, 0)) {
2039 if (bus_clk_table && bus_clk_table_len) {
2040 pdata->bus_clk_table = bus_clk_table;
2041 pdata->bus_clk_cnt = bus_clk_table_len;
2042 }
2043 }
2044
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302045 if (msm_host->ice.pdev) {
2046 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
2047 &ice_clk_table, &ice_clk_table_len, 0)) {
2048 dev_err(dev, "failed parsing supported ice clock rates\n");
2049 goto out;
2050 }
2051 if (!ice_clk_table || !ice_clk_table_len) {
2052 dev_err(dev, "Invalid clock table\n");
2053 goto out;
2054 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05302055 if (ice_clk_table_len != 2) {
2056 dev_err(dev, "Need max and min frequencies in the table\n");
2057 goto out;
2058 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302059 pdata->sup_ice_clk_table = ice_clk_table;
2060 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05302061 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
2062 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
2063 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
2064 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302065 }
2066
Asutosh Das0ef24812012-12-18 16:14:02 +05302067 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
2068 sdhci_msm_slot_reg_data),
2069 GFP_KERNEL);
2070 if (!pdata->vreg_data) {
2071 dev_err(dev, "failed to allocate memory for vreg data\n");
2072 goto out;
2073 }
2074
2075 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
2076 "vdd")) {
2077 dev_err(dev, "failed parsing vdd data\n");
2078 goto out;
2079 }
2080 if (sdhci_msm_dt_parse_vreg_info(dev,
2081 &pdata->vreg_data->vdd_io_data,
2082 "vdd-io")) {
2083 dev_err(dev, "failed parsing vdd-io data\n");
2084 goto out;
2085 }
2086
2087 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
2088 dev_err(dev, "failed parsing gpio data\n");
2089 goto out;
2090 }
2091
Asutosh Das0ef24812012-12-18 16:14:02 +05302092 len = of_property_count_strings(np, "qcom,bus-speed-mode");
2093
2094 for (i = 0; i < len; i++) {
2095 const char *name = NULL;
2096
2097 of_property_read_string_index(np,
2098 "qcom,bus-speed-mode", i, &name);
2099 if (!name)
2100 continue;
2101
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002102 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
2103 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
2104 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
2105 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
2106 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05302107 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2108 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
2109 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2110 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
2111 pdata->caps |= MMC_CAP_1_8V_DDR
2112 | MMC_CAP_UHS_DDR50;
2113 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
2114 pdata->caps |= MMC_CAP_1_2V_DDR
2115 | MMC_CAP_UHS_DDR50;
2116 }
2117
2118 if (of_get_property(np, "qcom,nonremovable", NULL))
2119 pdata->nonremovable = true;
2120
Guoping Yuf7c91332014-08-20 16:56:18 +08002121 if (of_get_property(np, "qcom,nonhotplug", NULL))
2122 pdata->nonhotplug = true;
2123
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08002124 pdata->largeaddressbus =
2125 of_property_read_bool(np, "qcom,large-address-bus");
2126
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002127 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
2128 msm_host->mmc->wakeup_on_idle = true;
2129
Gilad Bronerc788a672015-09-08 15:39:11 +03002130 sdhci_msm_pm_qos_parse(dev, pdata);
2131
Pavan Anamula5a256df2015-10-16 14:38:28 +05302132 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302133 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05302134
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002135 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002136 msm_host->regs_restore.is_supported =
2137 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002138
Vijay Viswanatha5492612017-10-17 15:38:55 +05302139 if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
2140 pdata->rclk_wa = true;
2141
Asutosh Das1c43b132018-01-11 18:08:40 +05302142 /*
2143 * rclk_wa is not required if soc version is mentioned and
2144 * is not base version.
2145 */
2146 if (msm_host->soc_min_rev != 0)
2147 pdata->rclk_wa = false;
2148
Asutosh Das0ef24812012-12-18 16:14:02 +05302149 return pdata;
2150out:
2151 return NULL;
2152}
2153
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302154/* Returns required bandwidth in Bytes per Sec */
2155static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
2156 struct mmc_ios *ios)
2157{
Sahitya Tummala2886c922013-04-03 18:03:31 +05302158 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2159 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2160
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302161 unsigned int bw;
2162
Sahitya Tummala2886c922013-04-03 18:03:31 +05302163 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302164 /*
2165 * For DDR mode, SDCC controller clock will be at
2166 * the double rate than the actual clock that goes to card.
2167 */
2168 if (ios->bus_width == MMC_BUS_WIDTH_4)
2169 bw /= 2;
2170 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2171 bw /= 8;
2172
2173 return bw;
2174}
2175
2176static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2177 unsigned int bw)
2178{
2179 unsigned int *table = host->pdata->voting_data->bw_vecs;
2180 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2181 int i;
2182
2183 if (host->msm_bus_vote.is_max_bw_needed && bw)
2184 return host->msm_bus_vote.max_bw_vote;
2185
2186 for (i = 0; i < size; i++) {
2187 if (bw <= table[i])
2188 break;
2189 }
2190
2191 if (i && (i == size))
2192 i--;
2193
2194 return i;
2195}
2196
2197/*
2198 * This function must be called with host lock acquired.
2199 * Caller of this function should also ensure that msm bus client
2200 * handle is not null.
2201 */
2202static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2203 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302204 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302205{
2206 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2207 int rc = 0;
2208
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302209 BUG_ON(!flags);
2210
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302211 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302212 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302213 rc = msm_bus_scale_client_update_request(
2214 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302215 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302216 if (rc) {
2217 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2218 mmc_hostname(host->mmc),
2219 msm_host->msm_bus_vote.client_handle, vote, rc);
2220 goto out;
2221 }
2222 msm_host->msm_bus_vote.curr_vote = vote;
2223 }
2224out:
2225 return rc;
2226}
2227
2228/*
2229 * Internal work. Work to set 0 bandwidth for msm bus.
2230 */
2231static void sdhci_msm_bus_work(struct work_struct *work)
2232{
2233 struct sdhci_msm_host *msm_host;
2234 struct sdhci_host *host;
2235 unsigned long flags;
2236
2237 msm_host = container_of(work, struct sdhci_msm_host,
2238 msm_bus_vote.vote_work.work);
2239 host = platform_get_drvdata(msm_host->pdev);
2240
2241 if (!msm_host->msm_bus_vote.client_handle)
2242 return;
2243
2244 spin_lock_irqsave(&host->lock, flags);
2245 /* don't vote for 0 bandwidth if any request is in progress */
2246 if (!host->mrq) {
2247 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302248 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302249 } else
2250 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2251 mmc_hostname(host->mmc), __func__);
2252 spin_unlock_irqrestore(&host->lock, flags);
2253}
2254
2255/*
2256 * This function cancels any scheduled delayed work and sets the bus
2257 * vote based on bw (bandwidth) argument.
2258 */
2259static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2260 unsigned int bw)
2261{
2262 int vote;
2263 unsigned long flags;
2264 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2265 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2266
2267 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2268 spin_lock_irqsave(&host->lock, flags);
2269 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302270 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302271 spin_unlock_irqrestore(&host->lock, flags);
2272}
2273
2274#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2275
2276/* This function queues a work which will set the bandwidth requiement to 0 */
2277static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2278{
2279 unsigned long flags;
2280 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2281 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2282
2283 spin_lock_irqsave(&host->lock, flags);
2284 if (msm_host->msm_bus_vote.min_bw_vote !=
2285 msm_host->msm_bus_vote.curr_vote)
2286 queue_delayed_work(system_wq,
2287 &msm_host->msm_bus_vote.vote_work,
2288 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2289 spin_unlock_irqrestore(&host->lock, flags);
2290}
2291
2292static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2293 struct platform_device *pdev)
2294{
2295 int rc = 0;
2296 struct msm_bus_scale_pdata *bus_pdata;
2297
2298 struct sdhci_msm_bus_voting_data *data;
2299 struct device *dev = &pdev->dev;
2300
2301 data = devm_kzalloc(dev,
2302 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2303 if (!data) {
2304 dev_err(&pdev->dev,
2305 "%s: failed to allocate memory\n", __func__);
2306 rc = -ENOMEM;
2307 goto out;
2308 }
2309 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2310 if (data->bus_pdata) {
2311 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2312 &data->bw_vecs, &data->bw_vecs_size, 0);
2313 if (rc) {
2314 dev_err(&pdev->dev,
2315 "%s: Failed to get bus-bw-vectors-bps\n",
2316 __func__);
2317 goto out;
2318 }
2319 host->pdata->voting_data = data;
2320 }
2321 if (host->pdata->voting_data &&
2322 host->pdata->voting_data->bus_pdata &&
2323 host->pdata->voting_data->bw_vecs &&
2324 host->pdata->voting_data->bw_vecs_size) {
2325
2326 bus_pdata = host->pdata->voting_data->bus_pdata;
2327 host->msm_bus_vote.client_handle =
2328 msm_bus_scale_register_client(bus_pdata);
2329 if (!host->msm_bus_vote.client_handle) {
2330 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2331 rc = -EFAULT;
2332 goto out;
2333 }
2334 /* cache the vote index for minimum and maximum bandwidth */
2335 host->msm_bus_vote.min_bw_vote =
2336 sdhci_msm_bus_get_vote_for_bw(host, 0);
2337 host->msm_bus_vote.max_bw_vote =
2338 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2339 } else {
2340 devm_kfree(dev, data);
2341 }
2342
2343out:
2344 return rc;
2345}
2346
2347static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2348{
2349 if (host->msm_bus_vote.client_handle)
2350 msm_bus_scale_unregister_client(
2351 host->msm_bus_vote.client_handle);
2352}
2353
2354static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2355{
2356 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2357 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2358 struct mmc_ios *ios = &host->mmc->ios;
2359 unsigned int bw;
2360
2361 if (!msm_host->msm_bus_vote.client_handle)
2362 return;
2363
2364 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302365 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302366 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302367 } else {
2368 /*
2369 * If clock gating is enabled, then remove the vote
2370 * immediately because clocks will be disabled only
2371 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2372 * additional delay is required to remove the bus vote.
2373 */
2374#ifdef CONFIG_MMC_CLKGATE
2375 if (host->mmc->clkgate_delay)
2376 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2377 else
2378#endif
2379 sdhci_msm_bus_queue_work(host);
2380 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302381}
2382
Asutosh Das0ef24812012-12-18 16:14:02 +05302383/* Regulator utility functions */
2384static int sdhci_msm_vreg_init_reg(struct device *dev,
2385 struct sdhci_msm_reg_data *vreg)
2386{
2387 int ret = 0;
2388
2389 /* check if regulator is already initialized? */
2390 if (vreg->reg)
2391 goto out;
2392
2393 /* Get the regulator handle */
2394 vreg->reg = devm_regulator_get(dev, vreg->name);
2395 if (IS_ERR(vreg->reg)) {
2396 ret = PTR_ERR(vreg->reg);
2397 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2398 __func__, vreg->name, ret);
2399 goto out;
2400 }
2401
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302402 if (regulator_count_voltages(vreg->reg) > 0) {
2403 vreg->set_voltage_sup = true;
2404 /* sanity check */
2405 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2406 pr_err("%s: %s invalid constraints specified\n",
2407 __func__, vreg->name);
2408 ret = -EINVAL;
2409 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302410 }
2411
2412out:
2413 return ret;
2414}
2415
2416static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2417{
2418 if (vreg->reg)
2419 devm_regulator_put(vreg->reg);
2420}
2421
2422static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2423 *vreg, int uA_load)
2424{
2425 int ret = 0;
2426
2427 /*
2428 * regulators that do not support regulator_set_voltage also
2429 * do not support regulator_set_optimum_mode
2430 */
2431 if (vreg->set_voltage_sup) {
2432 ret = regulator_set_load(vreg->reg, uA_load);
2433 if (ret < 0)
2434 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2435 __func__, vreg->name, uA_load, ret);
2436 else
2437 /*
2438 * regulator_set_load() can return non zero
2439 * value even for success case.
2440 */
2441 ret = 0;
2442 }
2443 return ret;
2444}
2445
2446static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2447 int min_uV, int max_uV)
2448{
2449 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302450 if (vreg->set_voltage_sup) {
2451 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2452 if (ret) {
2453 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302454 __func__, vreg->name, min_uV, max_uV, ret);
2455 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302456 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302457
2458 return ret;
2459}
2460
2461static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2462{
2463 int ret = 0;
2464
2465 /* Put regulator in HPM (high power mode) */
2466 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2467 if (ret < 0)
2468 return ret;
2469
2470 if (!vreg->is_enabled) {
2471 /* Set voltage level */
2472 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2473 vreg->high_vol_level);
2474 if (ret)
2475 return ret;
2476 }
2477 ret = regulator_enable(vreg->reg);
2478 if (ret) {
2479 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2480 __func__, vreg->name, ret);
2481 return ret;
2482 }
2483 vreg->is_enabled = true;
2484 return ret;
2485}
2486
2487static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2488{
2489 int ret = 0;
2490
2491 /* Never disable regulator marked as always_on */
2492 if (vreg->is_enabled && !vreg->is_always_on) {
2493 ret = regulator_disable(vreg->reg);
2494 if (ret) {
2495 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2496 __func__, vreg->name, ret);
2497 goto out;
2498 }
2499 vreg->is_enabled = false;
2500
2501 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2502 if (ret < 0)
2503 goto out;
2504
2505 /* Set min. voltage level to 0 */
2506 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2507 if (ret)
2508 goto out;
2509 } else if (vreg->is_enabled && vreg->is_always_on) {
2510 if (vreg->lpm_sup) {
2511 /* Put always_on regulator in LPM (low power mode) */
2512 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2513 vreg->lpm_uA);
2514 if (ret < 0)
2515 goto out;
2516 }
2517 }
2518out:
2519 return ret;
2520}
2521
2522static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2523 bool enable, bool is_init)
2524{
2525 int ret = 0, i;
2526 struct sdhci_msm_slot_reg_data *curr_slot;
2527 struct sdhci_msm_reg_data *vreg_table[2];
2528
2529 curr_slot = pdata->vreg_data;
2530 if (!curr_slot) {
2531 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2532 __func__);
2533 goto out;
2534 }
2535
2536 vreg_table[0] = curr_slot->vdd_data;
2537 vreg_table[1] = curr_slot->vdd_io_data;
2538
2539 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2540 if (vreg_table[i]) {
2541 if (enable)
2542 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2543 else
2544 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2545 if (ret)
2546 goto out;
2547 }
2548 }
2549out:
2550 return ret;
2551}
2552
Asutosh Das0ef24812012-12-18 16:14:02 +05302553/* This init function should be called only once for each SDHC slot */
2554static int sdhci_msm_vreg_init(struct device *dev,
2555 struct sdhci_msm_pltfm_data *pdata,
2556 bool is_init)
2557{
2558 int ret = 0;
2559 struct sdhci_msm_slot_reg_data *curr_slot;
2560 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2561
2562 curr_slot = pdata->vreg_data;
2563 if (!curr_slot)
2564 goto out;
2565
2566 curr_vdd_reg = curr_slot->vdd_data;
2567 curr_vdd_io_reg = curr_slot->vdd_io_data;
2568
2569 if (!is_init)
2570 /* Deregister all regulators from regulator framework */
2571 goto vdd_io_reg_deinit;
2572
2573 /*
2574 * Get the regulator handle from voltage regulator framework
2575 * and then try to set the voltage level for the regulator
2576 */
2577 if (curr_vdd_reg) {
2578 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2579 if (ret)
2580 goto out;
2581 }
2582 if (curr_vdd_io_reg) {
2583 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2584 if (ret)
2585 goto vdd_reg_deinit;
2586 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302587
Asutosh Das0ef24812012-12-18 16:14:02 +05302588 if (ret)
2589 dev_err(dev, "vreg reset failed (%d)\n", ret);
2590 goto out;
2591
2592vdd_io_reg_deinit:
2593 if (curr_vdd_io_reg)
2594 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2595vdd_reg_deinit:
2596 if (curr_vdd_reg)
2597 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2598out:
2599 return ret;
2600}
2601
2602
2603static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2604 enum vdd_io_level level,
2605 unsigned int voltage_level)
2606{
2607 int ret = 0;
2608 int set_level;
2609 struct sdhci_msm_reg_data *vdd_io_reg;
2610
2611 if (!pdata->vreg_data)
2612 return ret;
2613
2614 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2615 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2616 switch (level) {
2617 case VDD_IO_LOW:
2618 set_level = vdd_io_reg->low_vol_level;
2619 break;
2620 case VDD_IO_HIGH:
2621 set_level = vdd_io_reg->high_vol_level;
2622 break;
2623 case VDD_IO_SET_LEVEL:
2624 set_level = voltage_level;
2625 break;
2626 default:
2627 pr_err("%s: invalid argument level = %d",
2628 __func__, level);
2629 ret = -EINVAL;
2630 return ret;
2631 }
2632 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2633 set_level);
2634 }
2635 return ret;
2636}
2637
Ritesh Harjani42876f42015-11-17 17:46:51 +05302638/*
2639 * Acquire spin-lock host->lock before calling this function
2640 */
2641static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2642 bool enable)
2643{
2644 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2645 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2646
2647 if (enable && !msm_host->is_sdiowakeup_enabled)
2648 enable_irq(msm_host->pdata->sdiowakeup_irq);
2649 else if (!enable && msm_host->is_sdiowakeup_enabled)
2650 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2651 else
2652 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2653 __func__, enable, msm_host->is_sdiowakeup_enabled);
2654 msm_host->is_sdiowakeup_enabled = enable;
2655}
2656
2657static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2658{
2659 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302660 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2661 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2662
Ritesh Harjani42876f42015-11-17 17:46:51 +05302663 unsigned long flags;
2664
2665 pr_debug("%s: irq (%d) received\n", __func__, irq);
2666
2667 spin_lock_irqsave(&host->lock, flags);
2668 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2669 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302670 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302671
2672 return IRQ_HANDLED;
2673}
2674
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302675void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2676{
2677 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2678 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302679 const struct sdhci_msm_offset *msm_host_offset =
2680 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302681 unsigned int irq_flags = 0;
2682 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302683
Siba Prasad0196fe42017-06-27 15:13:27 +05302684 if (pwr_irq_desc)
2685 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2686 state_use_accessors);
2687
2688 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302689 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302690 sdhci_msm_readl_relaxed(host,
2691 msm_host_offset->CORE_PWRCTL_STATUS),
2692 sdhci_msm_readl_relaxed(host,
2693 msm_host_offset->CORE_PWRCTL_MASK),
2694 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302695 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2696
2697 MMC_TRACE(host->mmc,
2698 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2699 __func__,
2700 sdhci_msm_readb_relaxed(host,
2701 msm_host_offset->CORE_PWRCTL_STATUS),
2702 sdhci_msm_readb_relaxed(host,
2703 msm_host_offset->CORE_PWRCTL_MASK),
2704 sdhci_msm_readb_relaxed(host,
2705 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302706}
2707
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002708static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value)
2709{
2710 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2711 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2712 const struct sdhci_msm_offset *msm_host_offset = msm_host->offset;
2713 int ret = 0, retry = 10;
2714
2715 /*
2716 * There is a rare HW scenario where the first clear pulse could be
2717 * lost when actual reset and clear/read of status register is
2718 * happening at a time. Hence, retry for at least 10 times to make
2719 * sure status register is cleared. Otherwise, this will result in
2720 * a spurious power IRQ resulting in system instability.
2721 */
2722 do {
2723 if (retry == 0) {
2724 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2725 mmc_hostname(host->mmc), value);
2726 sdhci_msm_dump_pwr_ctrl_regs(host);
2727 WARN_ON(1);
2728 ret = -EBUSY;
2729 break;
2730 }
2731
2732 /*
2733 * Clear the PWRCTL_STATUS interrupt bits by writing to the
2734 * corresponding bits in the PWRCTL_CLEAR register.
2735 */
2736 sdhci_msm_writeb_relaxed(value, host,
2737 msm_host_offset->CORE_PWRCTL_CLEAR);
2738 /*
2739 * SDHC has core_mem and hc_mem device memory and these memory
2740 * addresses do not fall within 1KB region. Hence, any update
2741 * to core_mem address space would require an mb() to ensure
2742 * this gets completed before its next update to registers
2743 * within hc_mem.
2744 */
2745 mb();
2746 retry--;
2747 udelay(10);
2748 } while (value & sdhci_msm_readb_relaxed(host,
2749 msm_host_offset->CORE_PWRCTL_STATUS));
2750
2751 return ret;
2752}
2753
Asutosh Das0ef24812012-12-18 16:14:02 +05302754static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2755{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002756 struct sdhci_host *host = (struct sdhci_host *)data;
2757 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2758 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302759 const struct sdhci_msm_offset *msm_host_offset =
2760 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302761 u8 irq_status = 0;
2762 u8 irq_ack = 0;
2763 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302764 int pwr_state = 0, io_level = 0;
2765 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05302766
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302767 irq_status = sdhci_msm_readb_relaxed(host,
2768 msm_host_offset->CORE_PWRCTL_STATUS);
2769
Asutosh Das0ef24812012-12-18 16:14:02 +05302770 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2771 mmc_hostname(msm_host->mmc), irq, irq_status);
2772
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002773 sdhci_msm_clear_pwrctl_status(host, irq_status);
Asutosh Das0ef24812012-12-18 16:14:02 +05302774
2775 /* Handle BUS ON/OFF*/
2776 if (irq_status & CORE_PWRCTL_BUS_ON) {
2777 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302778 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302779 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302780 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2781 VDD_IO_HIGH, 0);
2782 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302783 if (ret)
2784 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2785 else
2786 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302787
2788 pwr_state = REQ_BUS_ON;
2789 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302790 }
2791 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302792 if (msm_host->pltfm_init_done)
2793 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2794 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302795 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302796 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302797 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2798 VDD_IO_LOW, 0);
2799 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302800 if (ret)
2801 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2802 else
2803 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302804
2805 pwr_state = REQ_BUS_OFF;
2806 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302807 }
2808 /* Handle IO LOW/HIGH */
2809 if (irq_status & CORE_PWRCTL_IO_LOW) {
2810 /* Switch voltage Low */
2811 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2812 if (ret)
2813 irq_ack |= CORE_PWRCTL_IO_FAIL;
2814 else
2815 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302816
2817 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302818 }
2819 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2820 /* Switch voltage High */
2821 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2822 if (ret)
2823 irq_ack |= CORE_PWRCTL_IO_FAIL;
2824 else
2825 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302826
2827 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302828 }
2829
2830 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302831 sdhci_msm_writeb_relaxed(irq_ack, host,
2832 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302833 /*
2834 * SDHC has core_mem and hc_mem device memory and these memory
2835 * addresses do not fall within 1KB region. Hence, any update to
2836 * core_mem address space would require an mb() to ensure this gets
2837 * completed before its next update to registers within hc_mem.
2838 */
2839 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302840 if ((io_level & REQ_IO_HIGH) &&
2841 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2842 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302843 writel_relaxed((readl_relaxed(host->ioaddr +
2844 msm_host_offset->CORE_VENDOR_SPEC) &
2845 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2846 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002847 else if ((io_level & REQ_IO_LOW) ||
2848 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302849 writel_relaxed((readl_relaxed(host->ioaddr +
2850 msm_host_offset->CORE_VENDOR_SPEC) |
2851 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2852 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002853 mb();
2854
Asutosh Das0ef24812012-12-18 16:14:02 +05302855 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2856 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302857 spin_lock_irqsave(&host->lock, flags);
2858 if (pwr_state)
2859 msm_host->curr_pwr_state = pwr_state;
2860 if (io_level)
2861 msm_host->curr_io_level = io_level;
2862 complete(&msm_host->pwr_irq_completion);
2863 spin_unlock_irqrestore(&host->lock, flags);
2864
Asutosh Das0ef24812012-12-18 16:14:02 +05302865 return IRQ_HANDLED;
2866}
2867
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302868static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302869show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2870{
2871 struct sdhci_host *host = dev_get_drvdata(dev);
2872 int poll;
2873 unsigned long flags;
2874
2875 spin_lock_irqsave(&host->lock, flags);
2876 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2877 spin_unlock_irqrestore(&host->lock, flags);
2878
2879 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2880}
2881
2882static ssize_t
2883store_polling(struct device *dev, struct device_attribute *attr,
2884 const char *buf, size_t count)
2885{
2886 struct sdhci_host *host = dev_get_drvdata(dev);
2887 int value;
2888 unsigned long flags;
2889
2890 if (!kstrtou32(buf, 0, &value)) {
2891 spin_lock_irqsave(&host->lock, flags);
2892 if (value) {
2893 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2894 mmc_detect_change(host->mmc, 0);
2895 } else {
2896 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2897 }
2898 spin_unlock_irqrestore(&host->lock, flags);
2899 }
2900 return count;
2901}
2902
2903static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302904show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2905 char *buf)
2906{
2907 struct sdhci_host *host = dev_get_drvdata(dev);
2908 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2909 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2910
2911 return snprintf(buf, PAGE_SIZE, "%u\n",
2912 msm_host->msm_bus_vote.is_max_bw_needed);
2913}
2914
2915static ssize_t
2916store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2917 const char *buf, size_t count)
2918{
2919 struct sdhci_host *host = dev_get_drvdata(dev);
2920 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2921 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2922 uint32_t value;
2923 unsigned long flags;
2924
2925 if (!kstrtou32(buf, 0, &value)) {
2926 spin_lock_irqsave(&host->lock, flags);
2927 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2928 spin_unlock_irqrestore(&host->lock, flags);
2929 }
2930 return count;
2931}
2932
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302933static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302934{
2935 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2936 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302937 const struct sdhci_msm_offset *msm_host_offset =
2938 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302939 unsigned long flags;
2940 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302941 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302942
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302943 spin_lock_irqsave(&host->lock, flags);
2944 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2945 mmc_hostname(host->mmc), __func__, req_type,
2946 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302947 if (!msm_host->mci_removed)
2948 io_sig_sts = sdhci_msm_readl_relaxed(host,
2949 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302950
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302951 /*
2952 * The IRQ for request type IO High/Low will be generated when -
2953 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2954 * 2. If 1 is true and when there is a state change in 1.8V enable
2955 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2956 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2957 * layer tries to set it to 3.3V before card detection happens, the
2958 * IRQ doesn't get triggered as there is no state change in this bit.
2959 * The driver already handles this case by changing the IO voltage
2960 * level to high as part of controller power up sequence. Hence, check
2961 * for host->pwr to handle a case where IO voltage high request is
2962 * issued even before controller power up.
2963 */
2964 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2965 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2966 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2967 pr_debug("%s: do not wait for power IRQ that never comes\n",
2968 mmc_hostname(host->mmc));
2969 spin_unlock_irqrestore(&host->lock, flags);
2970 return;
2971 }
2972 }
2973
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302974 if ((req_type & msm_host->curr_pwr_state) ||
2975 (req_type & msm_host->curr_io_level))
2976 done = true;
2977 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302978
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302979 /*
2980 * This is needed here to hanlde a case where IRQ gets
2981 * triggered even before this function is called so that
2982 * x->done counter of completion gets reset. Otherwise,
2983 * next call to wait_for_completion returns immediately
2984 * without actually waiting for the IRQ to be handled.
2985 */
2986 if (done)
2987 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302988 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05302989 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05302990 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2991 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05302992 MMC_TRACE(host->mmc,
2993 "%s: request(%d) timed out waiting for pwr_irq\n",
2994 __func__, req_type);
2995 sdhci_msm_dump_pwr_ctrl_regs(host);
2996 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302997 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2998 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302999}
3000
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003001static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
3002{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303003 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3004 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3005 const struct sdhci_msm_offset *msm_host_offset =
3006 msm_host->offset;
3007 u32 config = readl_relaxed(host->ioaddr +
3008 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303009
3010 if (enable) {
3011 config |= CORE_CDR_EN;
3012 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303013 writel_relaxed(config, host->ioaddr +
3014 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303015 } else {
3016 config &= ~CORE_CDR_EN;
3017 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303018 writel_relaxed(config, host->ioaddr +
3019 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303020 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003021}
3022
Asutosh Das648f9d12013-01-10 21:11:04 +05303023static unsigned int sdhci_msm_max_segs(void)
3024{
3025 return SDHCI_MSM_MAX_SEGMENTS;
3026}
3027
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303028static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303029{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303030 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3031 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303032
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303033 return msm_host->pdata->sup_clk_table[0];
3034}
3035
3036static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
3037{
3038 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3039 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3040 int max_clk_index = msm_host->pdata->sup_clk_cnt;
3041
3042 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
3043}
3044
3045static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
3046 u32 req_clk)
3047{
3048 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3049 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3050 unsigned int sel_clk = -1;
3051 unsigned char cnt;
3052
3053 if (req_clk < sdhci_msm_get_min_clock(host)) {
3054 sel_clk = sdhci_msm_get_min_clock(host);
3055 return sel_clk;
3056 }
3057
3058 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
3059 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
3060 break;
3061 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
3062 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3063 break;
3064 } else {
3065 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3066 }
3067 }
3068 return sel_clk;
3069}
3070
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303071static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
3072 u32 apps_clk)
3073{
3074 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3075 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3076 long sel_clk = -1;
3077 unsigned char cnt;
3078
3079 if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
3080 pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
3081 mmc_hostname(host->mmc), __func__,
3082 (unsigned int)msm_host->pdata->bus_clk_cnt,
3083 (unsigned int)msm_host->pdata->sup_clk_cnt);
3084 return msm_host->pdata->bus_clk_table[0];
3085 }
3086 if (apps_clk == sdhci_msm_get_min_clock(host)) {
3087 sel_clk = msm_host->pdata->bus_clk_table[0];
3088 return sel_clk;
3089 }
3090
3091 for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
3092 if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
3093 break;
3094 sel_clk = msm_host->pdata->bus_clk_table[cnt];
3095 }
3096 return sel_clk;
3097}
3098
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003099static void sdhci_msm_registers_save(struct sdhci_host *host)
3100{
3101 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3102 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3103 const struct sdhci_msm_offset *msm_host_offset =
3104 msm_host->offset;
3105
3106 if (!msm_host->regs_restore.is_supported)
3107 return;
3108
3109 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
3110 msm_host_offset->CORE_VENDOR_SPEC);
3111 msm_host->regs_restore.vendor_pwrctl_mask =
3112 readl_relaxed(host->ioaddr +
3113 msm_host_offset->CORE_PWRCTL_MASK);
3114 msm_host->regs_restore.vendor_func2 =
3115 readl_relaxed(host->ioaddr +
3116 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3117 msm_host->regs_restore.vendor_func3 =
3118 readl_relaxed(host->ioaddr +
3119 msm_host_offset->CORE_VENDOR_SPEC3);
3120 msm_host->regs_restore.hc_2c_2e =
3121 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
3122 msm_host->regs_restore.hc_3c_3e =
3123 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
3124 msm_host->regs_restore.vendor_pwrctl_ctl =
3125 readl_relaxed(host->ioaddr +
3126 msm_host_offset->CORE_PWRCTL_CTL);
3127 msm_host->regs_restore.hc_38_3a =
3128 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
3129 msm_host->regs_restore.hc_34_36 =
3130 sdhci_readl(host, SDHCI_INT_ENABLE);
3131 msm_host->regs_restore.hc_28_2a =
3132 sdhci_readl(host, SDHCI_HOST_CONTROL);
3133 msm_host->regs_restore.vendor_caps_0 =
3134 readl_relaxed(host->ioaddr +
3135 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3136 msm_host->regs_restore.hc_caps_1 =
3137 sdhci_readl(host, SDHCI_CAPABILITIES_1);
3138 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
3139 msm_host_offset->CORE_TESTBUS_CONFIG);
3140 msm_host->regs_restore.is_valid = true;
3141
3142 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
3143 mmc_hostname(host->mmc), __func__,
3144 readl_relaxed(host->ioaddr +
3145 msm_host_offset->CORE_PWRCTL_MASK));
3146}
3147
3148static void sdhci_msm_registers_restore(struct sdhci_host *host)
3149{
3150 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3151 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003152 u8 irq_status;
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003153 const struct sdhci_msm_offset *msm_host_offset =
3154 msm_host->offset;
3155
3156 if (!msm_host->regs_restore.is_supported ||
3157 !msm_host->regs_restore.is_valid)
3158 return;
3159
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003160 writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003161 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
3162 msm_host_offset->CORE_VENDOR_SPEC);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003163 writel_relaxed(msm_host->regs_restore.vendor_func2,
3164 host->ioaddr +
3165 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3166 writel_relaxed(msm_host->regs_restore.vendor_func3,
3167 host->ioaddr +
3168 msm_host_offset->CORE_VENDOR_SPEC3);
3169 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
3170 SDHCI_CLOCK_CONTROL);
3171 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
3172 SDHCI_AUTO_CMD_ERR);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003173 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
3174 SDHCI_SIGNAL_ENABLE);
3175 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
3176 SDHCI_INT_ENABLE);
3177 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
3178 SDHCI_HOST_CONTROL);
3179 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
3180 host->ioaddr +
3181 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3182 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
3183 SDHCI_CAPABILITIES_1);
3184 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
3185 msm_host_offset->CORE_TESTBUS_CONFIG);
3186 msm_host->regs_restore.is_valid = false;
3187
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003188 /*
3189 * Clear the PWRCTL_STATUS register.
3190 * There is a rare HW scenario where the first clear pulse could be
3191 * lost when actual reset and clear/read of status register is
3192 * happening at a time. Hence, retry for at least 10 times to make
3193 * sure status register is cleared. Otherwise, this will result in
3194 * a spurious power IRQ resulting in system instability.
3195 */
3196 irq_status = sdhci_msm_readb_relaxed(host,
3197 msm_host_offset->CORE_PWRCTL_STATUS);
3198
3199 sdhci_msm_clear_pwrctl_status(host, irq_status);
3200
3201 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
3202 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
3203 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
3204 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
3205
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003206 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
3207 mmc_hostname(host->mmc), __func__,
3208 readl_relaxed(host->ioaddr +
3209 msm_host_offset->CORE_PWRCTL_MASK));
3210}
3211
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303212static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
3213{
3214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3215 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3216 int rc = 0;
3217
3218 if (atomic_read(&msm_host->controller_clock))
3219 return 0;
3220
3221 sdhci_msm_bus_voting(host, 1);
3222
3223 if (!IS_ERR(msm_host->pclk)) {
3224 rc = clk_prepare_enable(msm_host->pclk);
3225 if (rc) {
3226 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3227 mmc_hostname(host->mmc), __func__, rc);
3228 goto remove_vote;
3229 }
3230 }
3231
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303232 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3233 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3234 if (rc) {
3235 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3236 mmc_hostname(host->mmc), __func__, rc);
3237 goto disable_pclk;
3238 }
3239 }
3240
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303241 rc = clk_prepare_enable(msm_host->clk);
3242 if (rc) {
3243 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3244 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303245 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303246 }
3247
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303248 if (!IS_ERR(msm_host->ice_clk)) {
3249 rc = clk_prepare_enable(msm_host->ice_clk);
3250 if (rc) {
3251 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3252 mmc_hostname(host->mmc), __func__, rc);
3253 goto disable_host_clk;
3254 }
3255 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303256 atomic_set(&msm_host->controller_clock, 1);
3257 pr_debug("%s: %s: enabled controller clock\n",
3258 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003259 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303260 goto out;
3261
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303262disable_host_clk:
3263 if (!IS_ERR(msm_host->clk))
3264 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303265disable_bus_aggr_clk:
3266 if (!IS_ERR(msm_host->bus_aggr_clk))
3267 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303268disable_pclk:
3269 if (!IS_ERR(msm_host->pclk))
3270 clk_disable_unprepare(msm_host->pclk);
3271remove_vote:
3272 if (msm_host->msm_bus_vote.client_handle)
3273 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3274out:
3275 return rc;
3276}
3277
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303278static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3279{
3280 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3281 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303282
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303283 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003284 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303285 if (!IS_ERR(msm_host->clk))
3286 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303287 if (!IS_ERR(msm_host->ice_clk))
3288 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303289 if (!IS_ERR(msm_host->bus_aggr_clk))
3290 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303291 if (!IS_ERR(msm_host->pclk))
3292 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303293 sdhci_msm_bus_voting(host, 0);
3294 atomic_set(&msm_host->controller_clock, 0);
3295 pr_debug("%s: %s: disabled controller clock\n",
3296 mmc_hostname(host->mmc), __func__);
3297 }
3298}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303299
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303300static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3301{
3302 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3303 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3304 int rc = 0;
3305
3306 if (enable && !atomic_read(&msm_host->clks_on)) {
3307 pr_debug("%s: request to enable clocks\n",
3308 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303309
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303310 /*
3311 * The bus-width or the clock rate might have changed
3312 * after controller clocks are enbaled, update bus vote
3313 * in such case.
3314 */
3315 if (atomic_read(&msm_host->controller_clock))
3316 sdhci_msm_bus_voting(host, 1);
3317
3318 rc = sdhci_msm_enable_controller_clock(host);
3319 if (rc)
3320 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303321
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303322 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3323 rc = clk_prepare_enable(msm_host->bus_clk);
3324 if (rc) {
3325 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3326 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303327 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303328 }
3329 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003330 if (!IS_ERR(msm_host->ff_clk)) {
3331 rc = clk_prepare_enable(msm_host->ff_clk);
3332 if (rc) {
3333 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3334 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303335 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003336 }
3337 }
3338 if (!IS_ERR(msm_host->sleep_clk)) {
3339 rc = clk_prepare_enable(msm_host->sleep_clk);
3340 if (rc) {
3341 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3342 mmc_hostname(host->mmc), __func__, rc);
3343 goto disable_ff_clk;
3344 }
3345 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303346 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303347
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303348 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303349 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3350 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303351 /*
3352 * During 1.8V signal switching the clock source must
3353 * still be ON as it requires accessing SDHC
3354 * registers (SDHCi host control2 register bit 3 must
3355 * be written and polled after stopping the SDCLK).
3356 */
3357 if (host->mmc->card_clock_off)
3358 return 0;
3359 pr_debug("%s: request to disable clocks\n",
3360 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003361 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3362 clk_disable_unprepare(msm_host->sleep_clk);
3363 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3364 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303365 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3366 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003367 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303368 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303369 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303370 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003371disable_ff_clk:
3372 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3373 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303374disable_bus_clk:
3375 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3376 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303377disable_controller_clk:
3378 if (!IS_ERR_OR_NULL(msm_host->clk))
3379 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303380 if (!IS_ERR(msm_host->ice_clk))
3381 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303382 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3383 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303384 if (!IS_ERR_OR_NULL(msm_host->pclk))
3385 clk_disable_unprepare(msm_host->pclk);
3386 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303387remove_vote:
3388 if (msm_host->msm_bus_vote.client_handle)
3389 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303390out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303391 return rc;
3392}
3393
3394static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3395{
3396 int rc;
3397 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3398 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303399 const struct sdhci_msm_offset *msm_host_offset =
3400 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003401 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303402 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003403 u32 sup_clock, ddr_clock, dll_lock;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303404 long bus_clk_rate;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303405 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303406
3407 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303408 /*
3409 * disable pwrsave to ensure clock is not auto-gated until
3410 * the rate is >400KHz (initialization complete).
3411 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303412 writel_relaxed(readl_relaxed(host->ioaddr +
3413 msm_host_offset->CORE_VENDOR_SPEC) &
3414 ~CORE_CLK_PWRSAVE, host->ioaddr +
3415 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303416 sdhci_msm_prepare_clocks(host, false);
3417 host->clock = clock;
3418 goto out;
3419 }
3420
3421 rc = sdhci_msm_prepare_clocks(host, true);
3422 if (rc)
3423 goto out;
3424
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303425 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3426 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303427 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003428 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303429 writel_relaxed(readl_relaxed(host->ioaddr +
3430 msm_host_offset->CORE_VENDOR_SPEC)
3431 | CORE_CLK_PWRSAVE, host->ioaddr +
3432 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303433 /*
3434 * Disable pwrsave for a newly added card if doesn't allow clock
3435 * gating.
3436 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003437 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303438 writel_relaxed(readl_relaxed(host->ioaddr +
3439 msm_host_offset->CORE_VENDOR_SPEC)
3440 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3441 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303442
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303443 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003444 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003445 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003446 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303447 /*
3448 * The SDHC requires internal clock frequency to be double the
3449 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003450 * uses the faster clock(100/400MHz) for some of its parts and
3451 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303452 */
3453 ddr_clock = clock * 2;
3454 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3455 ddr_clock);
3456 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003457
3458 /*
3459 * In general all timing modes are controlled via UHS mode select in
3460 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3461 * their respective modes defined here, hence we use these values.
3462 *
3463 * HS200 - SDR104 (Since they both are equivalent in functionality)
3464 * HS400 - This involves multiple configurations
3465 * Initially SDR104 - when tuning is required as HS200
3466 * Then when switching to DDR @ 400MHz (HS400) we use
3467 * the vendor specific HC_SELECT_IN to control the mode.
3468 *
3469 * In addition to controlling the modes we also need to select the
3470 * correct input clock for DLL depending on the mode.
3471 *
3472 * HS400 - divided clock (free running MCLK/2)
3473 * All other modes - default (free running MCLK)
3474 */
3475 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3476 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303477 writel_relaxed(((readl_relaxed(host->ioaddr +
3478 msm_host_offset->CORE_VENDOR_SPEC)
3479 & ~CORE_HC_MCLK_SEL_MASK)
3480 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3481 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003482 /*
3483 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3484 * register
3485 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303486 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003487 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303488 msm_host->enhanced_strobe)) &&
3489 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003490 /*
3491 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3492 * field in VENDOR_SPEC_FUNC
3493 */
3494 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303495 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003496 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303497 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3498 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003499 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003500 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3501 /*
3502 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3503 * CORE_DLL_STATUS to be set. This should get set
3504 * with in 15 us at 200 MHz.
3505 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303506 rc = readl_poll_timeout(host->ioaddr +
3507 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003508 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3509 CORE_DDR_DLL_LOCK)), 10, 1000);
3510 if (rc == -ETIMEDOUT)
3511 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3512 mmc_hostname(host->mmc),
3513 dll_lock);
3514 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003515 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003516 if (!msm_host->use_cdclp533)
3517 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3518 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303519 msm_host_offset->CORE_VENDOR_SPEC3)
3520 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3521 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003522
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003523 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303524 writel_relaxed(((readl_relaxed(host->ioaddr +
3525 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003526 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303527 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3528 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003529
3530 /*
3531 * Disable HC_SELECT_IN to be able to use the UHS mode select
3532 * configuration from Host Control2 register for all other
3533 * modes.
3534 *
3535 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3536 * in VENDOR_SPEC_FUNC
3537 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303538 writel_relaxed((readl_relaxed(host->ioaddr +
3539 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003540 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303541 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3542 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003543 }
3544 mb();
3545
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303546 if (sup_clock != msm_host->clk_rate) {
3547 pr_debug("%s: %s: setting clk rate to %u\n",
3548 mmc_hostname(host->mmc), __func__, sup_clock);
3549 rc = clk_set_rate(msm_host->clk, sup_clock);
3550 if (rc) {
3551 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3552 mmc_hostname(host->mmc), __func__,
3553 sup_clock, rc);
3554 goto out;
3555 }
3556 msm_host->clk_rate = sup_clock;
3557 host->clock = clock;
Can Guob903ad82017-10-17 13:22:53 +08003558
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303559 if (!IS_ERR(msm_host->bus_aggr_clk) &&
3560 msm_host->pdata->bus_clk_cnt) {
3561 bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
3562 sup_clock);
3563 if (bus_clk_rate >= 0) {
3564 rc = clk_set_rate(msm_host->bus_aggr_clk,
3565 bus_clk_rate);
3566 if (rc) {
3567 pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
3568 mmc_hostname(host->mmc),
3569 __func__, bus_clk_rate, rc);
3570 goto out;
3571 }
3572 } else {
3573 pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
3574 mmc_hostname(host->mmc), __func__,
3575 sup_clock, bus_clk_rate);
3576 }
3577 }
3578
Can Guob903ad82017-10-17 13:22:53 +08003579 /* Configure pinctrl drive type according to
3580 * current clock rate
3581 */
3582 rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
3583 if (rc)
3584 pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
3585 mmc_hostname(host->mmc), __func__,
3586 clock, rc);
3587
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303588 /*
3589 * Update the bus vote in case of frequency change due to
3590 * clock scaling.
3591 */
3592 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303593 }
3594out:
3595 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303596}
3597
Sahitya Tummala14613432013-03-21 11:13:25 +05303598static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3599 unsigned int uhs)
3600{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003601 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3602 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303603 const struct sdhci_msm_offset *msm_host_offset =
3604 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303605 u16 ctrl_2;
3606
3607 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3608 /* Select Bus Speed Mode for host */
3609 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003610 if ((uhs == MMC_TIMING_MMC_HS400) ||
3611 (uhs == MMC_TIMING_MMC_HS200) ||
3612 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303613 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3614 else if (uhs == MMC_TIMING_UHS_SDR12)
3615 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3616 else if (uhs == MMC_TIMING_UHS_SDR25)
3617 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3618 else if (uhs == MMC_TIMING_UHS_SDR50)
3619 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003620 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3621 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303622 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303623 /*
3624 * When clock frquency is less than 100MHz, the feedback clock must be
3625 * provided and DLL must not be used so that tuning can be skipped. To
3626 * provide feedback clock, the mode selection can be any value less
3627 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3628 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003629 if (host->clock <= CORE_FREQ_100MHZ) {
3630 if ((uhs == MMC_TIMING_MMC_HS400) ||
3631 (uhs == MMC_TIMING_MMC_HS200) ||
3632 (uhs == MMC_TIMING_UHS_SDR104))
3633 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303634
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003635 /*
3636 * Make sure DLL is disabled when not required
3637 *
3638 * Write 1 to DLL_RST bit of DLL_CONFIG register
3639 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303640 writel_relaxed((readl_relaxed(host->ioaddr +
3641 msm_host_offset->CORE_DLL_CONFIG)
3642 | CORE_DLL_RST), host->ioaddr +
3643 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003644
3645 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303646 writel_relaxed((readl_relaxed(host->ioaddr +
3647 msm_host_offset->CORE_DLL_CONFIG)
3648 | CORE_DLL_PDN), host->ioaddr +
3649 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003650 mb();
3651
3652 /*
3653 * The DLL needs to be restored and CDCLP533 recalibrated
3654 * when the clock frequency is set back to 400MHz.
3655 */
3656 msm_host->calibration_done = false;
3657 }
3658
3659 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3660 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303661 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3662
3663}
3664
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003665#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003666#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303667static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003668{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303669 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303670 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3671 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303672 const struct sdhci_msm_offset *msm_host_offset =
3673 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303674 struct cmdq_host *cq_host = host->cq_host;
3675
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303676 u32 version = sdhci_msm_readl_relaxed(host,
3677 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003678 u16 minor = version & CORE_VERSION_TARGET_MASK;
3679 /* registers offset changed starting from 4.2.0 */
3680 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3681
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303682 if (cq_host->offset_changed)
3683 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003684 pr_err("---- Debug RAM dump ----\n");
3685 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3686 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3687 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3688
3689 while (i < 16) {
3690 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3691 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3692 i++;
3693 }
3694 pr_err("-------------------------\n");
3695}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303696
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303697static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3698{
3699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3700 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3701 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3702
3703 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3704 sizeof(struct mmc_host));
3705 if (msm_host->mmc->card)
3706 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3707 sizeof(struct mmc_card));
3708 memcpy(&cached_data->copy_host, host,
3709 sizeof(struct sdhci_host));
3710}
3711
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303712void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3713{
3714 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3715 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303716 const struct sdhci_msm_offset *msm_host_offset =
3717 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303718 int tbsel, tbsel2;
3719 int i, index = 0;
3720 u32 test_bus_val = 0;
3721 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303722 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303723
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303724 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303725 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003726 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303727 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003728
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303729 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3730 sdhci_msm_readl_relaxed(host,
3731 msm_host_offset->CORE_MCI_DATA_CNT),
3732 sdhci_msm_readl_relaxed(host,
3733 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303734 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303735 sdhci_msm_readl_relaxed(host,
3736 msm_host_offset->CORE_MCI_DATA_CNT),
3737 sdhci_msm_readl_relaxed(host,
3738 msm_host_offset->CORE_MCI_FIFO_CNT),
3739 sdhci_msm_readl_relaxed(host,
3740 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303741 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303742 readl_relaxed(host->ioaddr +
3743 msm_host_offset->CORE_DLL_CONFIG),
3744 readl_relaxed(host->ioaddr +
3745 msm_host_offset->CORE_DLL_STATUS),
3746 sdhci_msm_readl_relaxed(host,
3747 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303748 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303749 readl_relaxed(host->ioaddr +
3750 msm_host_offset->CORE_VENDOR_SPEC),
3751 readl_relaxed(host->ioaddr +
3752 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3753 readl_relaxed(host->ioaddr +
3754 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303755 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303756 readl_relaxed(host->ioaddr +
3757 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303758
3759 /*
3760 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3761 * of CORE_TESTBUS_CONFIG register.
3762 *
3763 * To select test bus 0 to 7 use tbsel and to select any test bus
3764 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3765 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3766 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3767 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003768 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303769 for (tbsel = 0; tbsel < 8; tbsel++) {
3770 if (index >= MAX_TEST_BUS)
3771 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303772 test_bus_val =
3773 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3774 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3775 sdhci_msm_writel_relaxed(test_bus_val, host,
3776 msm_host_offset->CORE_TESTBUS_CONFIG);
3777 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3778 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303779 }
3780 }
3781 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3782 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3783 i, i + 3, debug_reg[i], debug_reg[i+1],
3784 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303785 if (host->is_crypto_en) {
3786 sdhci_msm_ice_get_status(host, &sts);
3787 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003788 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303789 }
3790}
3791
3792static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3793{
3794 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3795 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3796
3797 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303798 if (msm_host->ice.pdev) {
3799 if (msm_host->ice_hci_support)
3800 writel_relaxed(1, host->ioaddr +
3801 HC_VENDOR_SPECIFIC_ICE_CTRL);
3802 else
3803 writel_relaxed(1,
3804 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3805 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303806
3807 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003808}
3809
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303810/*
3811 * sdhci_msm_enhanced_strobe_mask :-
3812 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3813 * SW should write 3 to
3814 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3815 * The default reset value of this register is 2.
3816 */
3817static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3818{
3819 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3820 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303821 const struct sdhci_msm_offset *msm_host_offset =
3822 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303823
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303824 if (!msm_host->enhanced_strobe ||
3825 !mmc_card_strobe(msm_host->mmc->card)) {
3826 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303827 mmc_hostname(host->mmc));
3828 return;
3829 }
3830
3831 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303832 writel_relaxed((readl_relaxed(host->ioaddr +
3833 msm_host_offset->CORE_VENDOR_SPEC3)
3834 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3835 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303836 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303837 writel_relaxed((readl_relaxed(host->ioaddr +
3838 msm_host_offset->CORE_VENDOR_SPEC3)
3839 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3840 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303841 }
3842}
3843
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003844static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3845{
3846 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3847 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303848 const struct sdhci_msm_offset *msm_host_offset =
3849 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003850
3851 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303852 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3853 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003854 } else {
3855 u32 value;
3856
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303857 value = sdhci_msm_readl_relaxed(host,
3858 msm_host_offset->CORE_TESTBUS_CONFIG);
3859 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3860 sdhci_msm_writel_relaxed(value, host,
3861 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003862 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303863}
3864
Pavan Anamula691dd592015-08-25 16:11:20 +05303865void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3866{
3867 u32 vendor_func2;
3868 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303869 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3870 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3871 const struct sdhci_msm_offset *msm_host_offset =
3872 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303873
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303874 vendor_func2 = readl_relaxed(host->ioaddr +
3875 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303876
3877 if (enable) {
3878 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303879 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303880 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303881 while (readl_relaxed(host->ioaddr +
3882 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303883 if (timeout == 0) {
3884 pr_info("%s: Applying wait idle disable workaround\n",
3885 mmc_hostname(host->mmc));
3886 /*
3887 * Apply the reset workaround to not wait for
3888 * pending data transfers on AXI before
3889 * resetting the controller. This could be
3890 * risky if the transfers were stuck on the
3891 * AXI bus.
3892 */
3893 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303894 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303895 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303896 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3897 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303898 host->reset_wa_t = ktime_get();
3899 return;
3900 }
3901 timeout--;
3902 udelay(10);
3903 }
3904 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3905 mmc_hostname(host->mmc));
3906 } else {
3907 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303908 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303909 }
3910}
3911
Gilad Broner44445992015-09-29 16:05:39 +03003912static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3913{
3914 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303915 container_of(work, struct sdhci_msm_pm_qos_irq,
3916 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003917
3918 if (atomic_read(&pm_qos_irq->counter))
3919 return;
3920
3921 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3922 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3923}
3924
3925void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3926{
3927 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3928 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3929 struct sdhci_msm_pm_qos_latency *latency =
3930 &msm_host->pdata->pm_qos_data.irq_latency;
3931 int counter;
3932
3933 if (!msm_host->pm_qos_irq.enabled)
3934 return;
3935
3936 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3937 /* Make sure to update the voting in case power policy has changed */
3938 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3939 && counter > 1)
3940 return;
3941
Asutosh Das36c2e922015-12-01 12:19:58 +05303942 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003943 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3944 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3945 msm_host->pm_qos_irq.latency);
3946}
3947
3948void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3949{
3950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3951 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3952 int counter;
3953
3954 if (!msm_host->pm_qos_irq.enabled)
3955 return;
3956
Subhash Jadavani4d813902015-10-15 12:16:43 -07003957 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3958 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3959 } else {
3960 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3961 return;
Gilad Broner44445992015-09-29 16:05:39 +03003962 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003963
Gilad Broner44445992015-09-29 16:05:39 +03003964 if (counter)
3965 return;
3966
3967 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05303968 queue_delayed_work(msm_host->pm_qos_wq,
3969 &msm_host->pm_qos_irq.unvote_work,
3970 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003971 return;
3972 }
3973
3974 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3975 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3976 msm_host->pm_qos_irq.latency);
3977}
3978
Gilad Broner68c54562015-09-20 11:59:46 +03003979static ssize_t
3980sdhci_msm_pm_qos_irq_show(struct device *dev,
3981 struct device_attribute *attr, char *buf)
3982{
3983 struct sdhci_host *host = dev_get_drvdata(dev);
3984 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3985 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3986 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3987
3988 return snprintf(buf, PAGE_SIZE,
3989 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3990 irq->enabled, atomic_read(&irq->counter), irq->latency);
3991}
3992
3993static ssize_t
3994sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3995 struct device_attribute *attr, char *buf)
3996{
3997 struct sdhci_host *host = dev_get_drvdata(dev);
3998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4000
4001 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
4002}
4003
4004static ssize_t
4005sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
4006 struct device_attribute *attr, const char *buf, size_t count)
4007{
4008 struct sdhci_host *host = dev_get_drvdata(dev);
4009 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4010 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4011 uint32_t value;
4012 bool enable;
4013 int ret;
4014
4015 ret = kstrtou32(buf, 0, &value);
4016 if (ret)
4017 goto out;
4018 enable = !!value;
4019
4020 if (enable == msm_host->pm_qos_irq.enabled)
4021 goto out;
4022
4023 msm_host->pm_qos_irq.enabled = enable;
4024 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304025 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004026 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4027 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4028 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4029 msm_host->pm_qos_irq.latency);
4030 }
4031
4032out:
4033 return count;
4034}
4035
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004036#ifdef CONFIG_SMP
4037static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4038 struct sdhci_host *host)
4039{
4040 msm_host->pm_qos_irq.req.irq = host->irq;
4041}
4042#else
4043static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4044 struct sdhci_host *host) { }
4045#endif
4046
Vijay Viswanath1971d222018-03-01 12:01:47 +05304047static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
4048{
4049 char *wq = NULL;
4050 bool ret = true;
4051
4052 wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
4053 dev_name(&msm_host->pdev->dev));
4054 if (!wq)
4055 return false;
4056 /*
4057 * Create a work queue with flag WQ_MEM_RECLAIM set for
4058 * pm_qos_unvote work. Because mmc thread is created with
4059 * flag PF_MEMALLOC set, kernel will check for work queue
4060 * flag WQ_MEM_RECLAIM when flush the work queue. If work
4061 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
4062 * will be triggered.
4063 */
4064 msm_host->pm_qos_wq = create_workqueue(wq);
4065 if (!msm_host->pm_qos_wq) {
4066 ret = false;
4067 dev_err(&msm_host->pdev->dev,
4068 "failed to create pm qos unvote work queue\n");
4069 }
4070 kfree(wq);
4071 return ret;
4072}
4073
Gilad Broner44445992015-09-29 16:05:39 +03004074void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
4075{
4076 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4077 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4078 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03004079 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004080
4081 if (!msm_host->pdata->pm_qos_data.irq_valid)
4082 return;
4083
4084 /* Initialize only once as this gets called per partition */
4085 if (msm_host->pm_qos_irq.enabled)
4086 return;
4087
4088 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4089 msm_host->pm_qos_irq.req.type =
4090 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004091 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
4092 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
4093 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03004094 else
4095 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
4096 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
4097
Vijay Viswanath1971d222018-03-01 12:01:47 +05304098 sdhci_msm_pm_qos_wq_init(msm_host);
4099
Asutosh Das36c2e922015-12-01 12:19:58 +05304100 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004101 sdhci_msm_pm_qos_irq_unvote_work);
4102 /* For initialization phase, set the performance latency */
4103 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
4104 msm_host->pm_qos_irq.latency =
4105 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
4106 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
4107 msm_host->pm_qos_irq.latency);
4108 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004109
4110 /* sysfs */
4111 msm_host->pm_qos_irq.enable_attr.show =
4112 sdhci_msm_pm_qos_irq_enable_show;
4113 msm_host->pm_qos_irq.enable_attr.store =
4114 sdhci_msm_pm_qos_irq_enable_store;
4115 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
4116 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
4117 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
4118 ret = device_create_file(&msm_host->pdev->dev,
4119 &msm_host->pm_qos_irq.enable_attr);
4120 if (ret)
4121 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
4122 __func__, ret);
4123
4124 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
4125 msm_host->pm_qos_irq.status_attr.store = NULL;
4126 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
4127 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
4128 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
4129 ret = device_create_file(&msm_host->pdev->dev,
4130 &msm_host->pm_qos_irq.status_attr);
4131 if (ret)
4132 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
4133 __func__, ret);
4134}
4135
4136static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
4137 struct device_attribute *attr, char *buf)
4138{
4139 struct sdhci_host *host = dev_get_drvdata(dev);
4140 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4141 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4142 struct sdhci_msm_pm_qos_group *group;
4143 int i;
4144 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4145 int offset = 0;
4146
4147 for (i = 0; i < nr_groups; i++) {
4148 group = &msm_host->pm_qos[i];
4149 offset += snprintf(&buf[offset], PAGE_SIZE,
4150 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
4151 i, group->req.cpus_affine.bits[0],
4152 msm_host->pm_qos_group_enable,
4153 atomic_read(&group->counter),
4154 group->latency);
4155 }
4156
4157 return offset;
4158}
4159
4160static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
4161 struct device_attribute *attr, char *buf)
4162{
4163 struct sdhci_host *host = dev_get_drvdata(dev);
4164 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4165 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4166
4167 return snprintf(buf, PAGE_SIZE, "%s\n",
4168 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
4169}
4170
4171static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
4172 struct device_attribute *attr, const char *buf, size_t count)
4173{
4174 struct sdhci_host *host = dev_get_drvdata(dev);
4175 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4176 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4177 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4178 uint32_t value;
4179 bool enable;
4180 int ret;
4181 int i;
4182
4183 ret = kstrtou32(buf, 0, &value);
4184 if (ret)
4185 goto out;
4186 enable = !!value;
4187
4188 if (enable == msm_host->pm_qos_group_enable)
4189 goto out;
4190
4191 msm_host->pm_qos_group_enable = enable;
4192 if (!enable) {
4193 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304194 cancel_delayed_work_sync(
4195 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004196 atomic_set(&msm_host->pm_qos[i].counter, 0);
4197 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
4198 pm_qos_update_request(&msm_host->pm_qos[i].req,
4199 msm_host->pm_qos[i].latency);
4200 }
4201 }
4202
4203out:
4204 return count;
Gilad Broner44445992015-09-29 16:05:39 +03004205}
4206
4207static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
4208{
4209 int i;
4210 struct sdhci_msm_cpu_group_map *map =
4211 &msm_host->pdata->pm_qos_data.cpu_group_map;
4212
4213 if (cpu < 0)
4214 goto not_found;
4215
4216 for (i = 0; i < map->nr_groups; i++)
4217 if (cpumask_test_cpu(cpu, &map->mask[i]))
4218 return i;
4219
4220not_found:
4221 return -EINVAL;
4222}
4223
4224void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
4225 struct sdhci_msm_pm_qos_latency *latency, int cpu)
4226{
4227 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4228 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4229 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4230 struct sdhci_msm_pm_qos_group *pm_qos_group;
4231 int counter;
4232
4233 if (!msm_host->pm_qos_group_enable || group < 0)
4234 return;
4235
4236 pm_qos_group = &msm_host->pm_qos[group];
4237 counter = atomic_inc_return(&pm_qos_group->counter);
4238
4239 /* Make sure to update the voting in case power policy has changed */
4240 if (pm_qos_group->latency == latency->latency[host->power_policy]
4241 && counter > 1)
4242 return;
4243
Asutosh Das36c2e922015-12-01 12:19:58 +05304244 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004245
4246 pm_qos_group->latency = latency->latency[host->power_policy];
4247 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
4248}
4249
4250static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
4251{
4252 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05304253 container_of(work, struct sdhci_msm_pm_qos_group,
4254 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004255
4256 if (atomic_read(&group->counter))
4257 return;
4258
4259 group->latency = PM_QOS_DEFAULT_VALUE;
4260 pm_qos_update_request(&group->req, group->latency);
4261}
4262
Gilad Broner07d92eb2015-09-29 16:57:21 +03004263bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03004264{
4265 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4266 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4267 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4268
4269 if (!msm_host->pm_qos_group_enable || group < 0 ||
4270 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03004271 return false;
Gilad Broner44445992015-09-29 16:05:39 +03004272
4273 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05304274 queue_delayed_work(msm_host->pm_qos_wq,
4275 &msm_host->pm_qos[group].unvote_work,
4276 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03004277 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004278 }
4279
4280 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4281 pm_qos_update_request(&msm_host->pm_qos[group].req,
4282 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004283 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004284}
4285
4286void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4287 struct sdhci_msm_pm_qos_latency *latency)
4288{
4289 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4290 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4291 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4292 struct sdhci_msm_pm_qos_group *group;
4293 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004294 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004295
4296 if (msm_host->pm_qos_group_enable)
4297 return;
4298
4299 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4300 GFP_KERNEL);
4301 if (!msm_host->pm_qos)
4302 return;
4303
4304 for (i = 0; i < nr_groups; i++) {
4305 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304306 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004307 sdhci_msm_pm_qos_cpu_unvote_work);
4308 atomic_set(&group->counter, 0);
4309 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4310 cpumask_copy(&group->req.cpus_affine,
4311 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304312 /* We set default latency here for all pm_qos cpu groups. */
4313 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004314 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4315 group->latency);
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304316 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
Gilad Broner44445992015-09-29 16:05:39 +03004317 __func__, i,
4318 group->req.cpus_affine.bits[0],
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304319 group->latency);
Gilad Broner44445992015-09-29 16:05:39 +03004320 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004321 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004322 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004323
4324 /* sysfs */
4325 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4326 msm_host->pm_qos_group_status_attr.store = NULL;
4327 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4328 msm_host->pm_qos_group_status_attr.attr.name =
4329 "pm_qos_cpu_groups_status";
4330 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4331 ret = device_create_file(&msm_host->pdev->dev,
4332 &msm_host->pm_qos_group_status_attr);
4333 if (ret)
4334 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4335 __func__, ret);
4336 msm_host->pm_qos_group_enable_attr.show =
4337 sdhci_msm_pm_qos_group_enable_show;
4338 msm_host->pm_qos_group_enable_attr.store =
4339 sdhci_msm_pm_qos_group_enable_store;
4340 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4341 msm_host->pm_qos_group_enable_attr.attr.name =
4342 "pm_qos_cpu_groups_enable";
4343 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4344 ret = device_create_file(&msm_host->pdev->dev,
4345 &msm_host->pm_qos_group_enable_attr);
4346 if (ret)
4347 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4348 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004349}
4350
Gilad Broner07d92eb2015-09-29 16:57:21 +03004351static void sdhci_msm_pre_req(struct sdhci_host *host,
4352 struct mmc_request *mmc_req)
4353{
4354 int cpu;
4355 int group;
4356 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4357 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4358 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4359 msm_host->pm_qos_prev_cpu);
4360
4361 sdhci_msm_pm_qos_irq_vote(host);
4362
4363 cpu = get_cpu();
4364 put_cpu();
4365 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4366 if (group < 0)
4367 return;
4368
4369 if (group != prev_group && prev_group >= 0) {
4370 sdhci_msm_pm_qos_cpu_unvote(host,
4371 msm_host->pm_qos_prev_cpu, false);
4372 prev_group = -1; /* make sure to vote for new group */
4373 }
4374
4375 if (prev_group < 0) {
4376 sdhci_msm_pm_qos_cpu_vote(host,
4377 msm_host->pdata->pm_qos_data.latency, cpu);
4378 msm_host->pm_qos_prev_cpu = cpu;
4379 }
4380}
4381
4382static void sdhci_msm_post_req(struct sdhci_host *host,
4383 struct mmc_request *mmc_req)
4384{
4385 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4386 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4387
4388 sdhci_msm_pm_qos_irq_unvote(host, false);
4389
4390 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4391 msm_host->pm_qos_prev_cpu = -1;
4392}
4393
4394static void sdhci_msm_init(struct sdhci_host *host)
4395{
4396 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4397 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4398
4399 sdhci_msm_pm_qos_irq_init(host);
4400
4401 if (msm_host->pdata->pm_qos_data.legacy_valid)
4402 sdhci_msm_pm_qos_cpu_init(host,
4403 msm_host->pdata->pm_qos_data.latency);
4404}
4405
Sahitya Tummala9150a942014-10-31 15:33:04 +05304406static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4407{
4408 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4409 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4410 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4411 u32 max_curr = 0;
4412
4413 if (curr_slot && curr_slot->vdd_data)
4414 max_curr = curr_slot->vdd_data->hpm_uA;
4415
4416 return max_curr;
4417}
4418
Sahitya Tummala073ca552015-08-06 13:59:37 +05304419static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4420{
4421 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4422 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4423 int ret = 0;
4424 u32 clk_rate = 0;
4425
4426 if (!IS_ERR(msm_host->ice_clk)) {
4427 clk_rate = (state == MMC_LOAD_LOW) ?
4428 msm_host->pdata->ice_clk_min :
4429 msm_host->pdata->ice_clk_max;
4430 if (msm_host->ice_clk_rate == clk_rate)
4431 return 0;
4432 pr_debug("%s: changing ICE clk rate to %u\n",
4433 mmc_hostname(host->mmc), clk_rate);
4434 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4435 if (ret) {
4436 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4437 mmc_hostname(host->mmc), ret, clk_rate);
4438 return ret;
4439 }
4440 msm_host->ice_clk_rate = clk_rate;
4441 }
4442 return 0;
4443}
4444
Asutosh Das0ef24812012-12-18 16:14:02 +05304445static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304446 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304447 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304448 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304449 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304450 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304451 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304452 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004453 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304454 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004455 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304456 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304457 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304458 .get_min_clock = sdhci_msm_get_min_clock,
4459 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304460 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304461 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304462 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004463 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304464 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004465 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304466 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304467 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004468 .init = sdhci_msm_init,
4469 .pre_req = sdhci_msm_pre_req,
4470 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304471 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304472 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304473};
4474
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304475static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4476 struct sdhci_host *host)
4477{
Krishna Konda46fd1432014-10-30 21:13:27 -07004478 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304479 u16 minor;
4480 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304481 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304482 const struct sdhci_msm_offset *msm_host_offset =
4483 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304484
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304485 version = sdhci_msm_readl_relaxed(host,
4486 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304487 major = (version & CORE_VERSION_MAJOR_MASK) >>
4488 CORE_VERSION_MAJOR_SHIFT;
4489 minor = version & CORE_VERSION_TARGET_MASK;
4490
Krishna Konda46fd1432014-10-30 21:13:27 -07004491 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4492
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304493 /*
4494 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004495 * controller won't advertise 3.0v, 1.8v and 8-bit features
4496 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304497 */
4498 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004499 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004500 /*
4501 * Enable 1.8V support capability on controllers that
4502 * support dual voltage
4503 */
4504 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004505 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4506 caps |= CORE_3_0V_SUPPORT;
4507 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004508 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304509 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4510 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304511 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004512
4513 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304514 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4515 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4516 */
4517 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304518 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304519 val = readl_relaxed(host->ioaddr +
4520 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304521 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304522 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304523 }
4524 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004525 * SDCC 5 controller with major version 1, minor version 0x34 and later
4526 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4527 */
4528 if ((major == 1) && (minor < 0x34))
4529 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004530
4531 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004532 * SDCC 5 controller with major version 1, minor version 0x42 and later
4533 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304534 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004535 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304536 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004537 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304538 msm_host->enhanced_strobe = true;
4539 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004540
4541 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004542 * SDCC 5 controller with major version 1 and minor version 0x42,
4543 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4544 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304545 * when MCLK is gated OFF, it is not gated for less than 0.5us
4546 * and MCLK must be switched on for at-least 1us before DATA
4547 * starts coming.
4548 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004549 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badiganti06d2c8c2017-09-12 17:24:09 +05304550 (minor == 0x49) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304551 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004552
Pavan Anamula5a256df2015-10-16 14:38:28 +05304553 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304554 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304555 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304556 writel_relaxed((readl_relaxed(host->ioaddr +
4557 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4558 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304559 }
4560
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004561 if ((major == 1) && (minor >= 0x49))
4562 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304563 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004564 * Mask 64-bit support for controller with 32-bit address bus so that
4565 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004566 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004567 if (!msm_host->pdata->largeaddressbus)
4568 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4569
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304570 writel_relaxed(caps, host->ioaddr +
4571 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004572 /* keep track of the value in SDHCI_CAPABILITIES */
4573 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304574
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304575 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304576 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304577 host->cdr_support = true;
4578 }
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +05304579
4580 if ((major == 1) && (minor >= 0x71))
4581 msm_host->need_dll_user_ctl = true;
4582
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304583}
4584
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004585#ifdef CONFIG_MMC_CQ_HCI
4586static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4587 struct platform_device *pdev)
4588{
4589 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4590 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4591
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304592 if (nocmdq) {
4593 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4594 return;
4595 }
4596
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004597 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004598 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004599 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4600 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004601 host->cq_host = NULL;
4602 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004603 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004604 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004605}
4606#else
4607static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4608 struct platform_device *pdev)
4609{
4610
4611}
4612#endif
4613
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004614static bool sdhci_msm_is_bootdevice(struct device *dev)
4615{
4616 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4617 strlen(saved_command_line))) {
4618 char search_string[50];
4619
4620 snprintf(search_string, ARRAY_SIZE(search_string),
4621 "androidboot.bootdevice=%s", dev_name(dev));
4622 if (strnstr(saved_command_line, search_string,
4623 strlen(saved_command_line)))
4624 return true;
4625 else
4626 return false;
4627 }
4628
4629 /*
4630 * "androidboot.bootdevice=" argument is not present then
4631 * return true as we don't know the boot device anyways.
4632 */
4633 return true;
4634}
4635
Asutosh Das0ef24812012-12-18 16:14:02 +05304636static int sdhci_msm_probe(struct platform_device *pdev)
4637{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304638 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304639 struct sdhci_host *host;
4640 struct sdhci_pltfm_host *pltfm_host;
4641 struct sdhci_msm_host *msm_host;
4642 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004643 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004644 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004645 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304646 struct resource *tlmm_memres = NULL;
4647 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304648 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004649 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304650
4651 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4652 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4653 GFP_KERNEL);
4654 if (!msm_host) {
4655 ret = -ENOMEM;
4656 goto out;
4657 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304658
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304659 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4660 msm_host->mci_removed = true;
4661 msm_host->offset = &sdhci_msm_offset_mci_removed;
4662 } else {
4663 msm_host->mci_removed = false;
4664 msm_host->offset = &sdhci_msm_offset_mci_present;
4665 }
4666 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304667 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4668 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4669 if (IS_ERR(host)) {
4670 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304671 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304672 }
4673
4674 pltfm_host = sdhci_priv(host);
4675 pltfm_host->priv = msm_host;
4676 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304677 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304678
Asutosh Das1c43b132018-01-11 18:08:40 +05304679 ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
4680 if (ret == -EPROBE_DEFER) {
4681 dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
4682 goto pltfm_free;
4683 }
4684
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304685 /* get the ice device vops if present */
4686 ret = sdhci_msm_ice_get_dev(host);
4687 if (ret == -EPROBE_DEFER) {
4688 /*
4689 * SDHCI driver might be probed before ICE driver does.
4690 * In that case we would like to return EPROBE_DEFER code
4691 * in order to delay its probing.
4692 */
4693 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4694 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004695 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304696
4697 } else if (ret == -ENODEV) {
4698 /*
4699 * ICE device is not enabled in DTS file. No need for further
4700 * initialization of ICE driver.
4701 */
4702 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4703 __func__);
4704 } else if (ret) {
4705 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4706 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004707 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304708 }
4709
Asutosh Das0ef24812012-12-18 16:14:02 +05304710 /* Extract platform data */
4711 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004712 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304713 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004714 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4715 ret);
4716 goto pltfm_free;
4717 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004718
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004719 /* Read property to determine if the probe is forced */
4720 force_probe = of_find_property(pdev->dev.of_node,
4721 "qcom,force-sdhc1-probe", NULL);
4722
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004723 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004724 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4725 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004726 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004727 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004728 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004729
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004730 if (disable_slots & (1 << (ret - 1))) {
4731 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4732 ret);
4733 ret = -ENODEV;
4734 goto pltfm_free;
4735 }
4736
Sayali Lokhande5f768322016-04-11 18:36:53 +05304737 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004738 sdhci_slot[ret-1] = msm_host;
4739
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004740 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4741 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304742 if (!msm_host->pdata) {
4743 dev_err(&pdev->dev, "DT parsing error\n");
4744 goto pltfm_free;
4745 }
4746 } else {
4747 dev_err(&pdev->dev, "No device tree node\n");
4748 goto pltfm_free;
4749 }
4750
4751 /* Setup Clocks */
4752
4753 /* Setup SDCC bus voter clock. */
4754 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4755 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4756 /* Vote for max. clk rate for max. performance */
4757 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4758 if (ret)
4759 goto pltfm_free;
4760 ret = clk_prepare_enable(msm_host->bus_clk);
4761 if (ret)
4762 goto pltfm_free;
4763 }
4764
4765 /* Setup main peripheral bus clock */
4766 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4767 if (!IS_ERR(msm_host->pclk)) {
4768 ret = clk_prepare_enable(msm_host->pclk);
4769 if (ret)
4770 goto bus_clk_disable;
4771 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304772 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304773
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304774 /* Setup SDC ufs bus aggr clock */
4775 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4776 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4777 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4778 if (ret) {
4779 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4780 goto pclk_disable;
4781 }
4782 }
4783
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304784 if (msm_host->ice.pdev) {
4785 /* Setup SDC ICE clock */
4786 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4787 if (!IS_ERR(msm_host->ice_clk)) {
4788 /* ICE core has only one clock frequency for now */
4789 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304790 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304791 if (ret) {
4792 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4793 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304794 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304795 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304796 }
4797 ret = clk_prepare_enable(msm_host->ice_clk);
4798 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304799 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304800
4801 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304802 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304803 }
4804 }
4805
Asutosh Das0ef24812012-12-18 16:14:02 +05304806 /* Setup SDC MMC clock */
4807 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4808 if (IS_ERR(msm_host->clk)) {
4809 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304810 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304811 }
4812
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304813 /* Set to the minimum supported clock frequency */
4814 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4815 if (ret) {
4816 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304817 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304818 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304819 ret = clk_prepare_enable(msm_host->clk);
4820 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304821 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304822
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304823 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304824 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304825
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004826 /* Setup CDC calibration fixed feedback clock */
4827 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4828 if (!IS_ERR(msm_host->ff_clk)) {
4829 ret = clk_prepare_enable(msm_host->ff_clk);
4830 if (ret)
4831 goto clk_disable;
4832 }
4833
4834 /* Setup CDC calibration sleep clock */
4835 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4836 if (!IS_ERR(msm_host->sleep_clk)) {
4837 ret = clk_prepare_enable(msm_host->sleep_clk);
4838 if (ret)
4839 goto ff_clk_disable;
4840 }
4841
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004842 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4843
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304844 ret = sdhci_msm_bus_register(msm_host, pdev);
4845 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004846 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304847
4848 if (msm_host->msm_bus_vote.client_handle)
4849 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4850 sdhci_msm_bus_work);
4851 sdhci_msm_bus_voting(host, 1);
4852
Asutosh Das0ef24812012-12-18 16:14:02 +05304853 /* Setup regulators */
4854 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4855 if (ret) {
4856 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304857 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304858 }
4859
4860 /* Reset the core and Enable SDHC mode */
4861 core_memres = platform_get_resource_byname(pdev,
4862 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304863 if (!msm_host->mci_removed) {
4864 if (!core_memres) {
4865 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4866 goto vreg_deinit;
4867 }
4868 msm_host->core_mem = devm_ioremap(&pdev->dev,
4869 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304870
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304871 if (!msm_host->core_mem) {
4872 dev_err(&pdev->dev, "Failed to remap registers\n");
4873 ret = -ENOMEM;
4874 goto vreg_deinit;
4875 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304876 }
4877
Sahitya Tummala079ed852015-10-29 20:18:45 +05304878 tlmm_memres = platform_get_resource_byname(pdev,
4879 IORESOURCE_MEM, "tlmm_mem");
4880 if (tlmm_memres) {
4881 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4882 resource_size(tlmm_memres));
4883
4884 if (!tlmm_mem) {
4885 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4886 ret = -ENOMEM;
4887 goto vreg_deinit;
4888 }
4889 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
Sahitya Tummala079ed852015-10-29 20:18:45 +05304890 }
4891
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304892 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004893 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304894 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004895 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304896 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304897
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05304898 /*
4899 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
4900 */
4901 writel_relaxed((readl_relaxed(host->ioaddr +
4902 msm_host_offset->CORE_VENDOR_SPEC3) &
4903 ~CORE_FIFO_ALT_EN), host->ioaddr +
4904 msm_host_offset->CORE_VENDOR_SPEC3);
4905
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304906 if (!msm_host->mci_removed) {
4907 /* Set HC_MODE_EN bit in HC_MODE register */
4908 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304909
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304910 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4911 writel_relaxed(readl_relaxed(msm_host->core_mem +
4912 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4913 msm_host->core_mem + CORE_HC_MODE);
4914 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304915 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004916
4917 /*
4918 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4919 * be used as required later on.
4920 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304921 writel_relaxed((readl_relaxed(host->ioaddr +
4922 msm_host_offset->CORE_VENDOR_SPEC) |
4923 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4924 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304925 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304926 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4927 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4928 * interrupt in GIC (by registering the interrupt handler), we need to
4929 * ensure that any pending power irq interrupt status is acknowledged
4930 * otherwise power irq interrupt handler would be fired prematurely.
4931 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304932 irq_status = sdhci_msm_readl_relaxed(host,
4933 msm_host_offset->CORE_PWRCTL_STATUS);
4934 sdhci_msm_writel_relaxed(irq_status, host,
4935 msm_host_offset->CORE_PWRCTL_CLEAR);
4936 irq_ctl = sdhci_msm_readl_relaxed(host,
4937 msm_host_offset->CORE_PWRCTL_CTL);
4938
Subhash Jadavani28137342013-05-14 17:46:43 +05304939 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4940 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4941 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4942 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304943 sdhci_msm_writel_relaxed(irq_ctl, host,
4944 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004945
Subhash Jadavani28137342013-05-14 17:46:43 +05304946 /*
4947 * Ensure that above writes are propogated before interrupt enablement
4948 * in GIC.
4949 */
4950 mb();
4951
4952 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304953 * Following are the deviations from SDHC spec v3.0 -
4954 * 1. Card detection is handled using separate GPIO.
4955 * 2. Bus power control is handled by interacting with PMIC.
4956 */
4957 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4958 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304959 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004960 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304961 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304962 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304963 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304964 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304965 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304966 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304967
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304968 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4969 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4970
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004971 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004972 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4973 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4974 SDHCI_VENDOR_VER_SHIFT));
4975 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4976 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4977 /*
4978 * Add 40us delay in interrupt handler when
4979 * operating at initialization frequency(400KHz).
4980 */
4981 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4982 /*
4983 * Set Software Reset for DAT line in Software
4984 * Reset Register (Bit 2).
4985 */
4986 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4987 }
4988
Asutosh Das214b9662013-06-13 14:27:42 +05304989 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4990
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004991 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004992 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4993 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304994 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004995 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304996 goto vreg_deinit;
4997 }
Subhash Jadavanide139e82017-09-27 11:04:40 +05304998
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004999 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05305000 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07005001 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305002 if (ret) {
5003 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005004 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05305005 goto vreg_deinit;
5006 }
5007
5008 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305009 sdhci_msm_writel_relaxed(INT_MASK, host,
5010 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05305011
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305012#ifdef CONFIG_MMC_CLKGATE
5013 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
5014 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
5015#endif
5016
Asutosh Das0ef24812012-12-18 16:14:02 +05305017 /* Set host capabilities */
5018 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
5019 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005020 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05305021 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05305022 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005023 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005024 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03005025 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05305026 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07005027 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03005028 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305029 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05305030
5031 if (msm_host->pdata->nonremovable)
5032 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
5033
Guoping Yuf7c91332014-08-20 16:56:18 +08005034 if (msm_host->pdata->nonhotplug)
5035 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
5036
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07005037 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
5038
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305039 /* Initialize ICE if present */
5040 if (msm_host->ice.pdev) {
5041 ret = sdhci_msm_ice_init(host);
5042 if (ret) {
5043 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
5044 mmc_hostname(host->mmc), ret);
5045 ret = -EINVAL;
5046 goto vreg_deinit;
5047 }
5048 host->is_crypto_en = true;
5049 /* Packed commands cannot be encrypted/decrypted using ICE */
5050 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
5051 MMC_CAP2_PACKED_WR_CONTROL);
5052 }
5053
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05305054 init_completion(&msm_host->pwr_irq_completion);
5055
Sahitya Tummala581df132013-03-12 14:57:46 +05305056 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05305057 /*
5058 * Set up the card detect GPIO in active configuration before
5059 * configuring it as an IRQ. Otherwise, it can be in some
5060 * weird/inconsistent state resulting in flood of interrupts.
5061 */
5062 sdhci_msm_setup_pins(msm_host->pdata, true);
5063
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05305064 /*
5065 * This delay is needed for stabilizing the card detect GPIO
5066 * line after changing the pull configs.
5067 */
5068 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05305069 ret = mmc_gpio_request_cd(msm_host->mmc,
5070 msm_host->pdata->status_gpio, 0);
5071 if (ret) {
5072 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
5073 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305074 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05305075 }
5076 }
5077
Krishna Konda7feab352013-09-17 23:55:40 -07005078 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
5079 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
5080 host->dma_mask = DMA_BIT_MASK(64);
5081 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305082 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07005083 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305084 host->dma_mask = DMA_BIT_MASK(32);
5085 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305086 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305087 } else {
5088 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
5089 }
5090
Ritesh Harjani42876f42015-11-17 17:46:51 +05305091 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
5092 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05305093 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305094 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
5095 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305096 msm_host->is_sdiowakeup_enabled = true;
5097 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
5098 sdhci_msm_sdiowakeup_irq,
5099 IRQF_SHARED | IRQF_TRIGGER_HIGH,
5100 "sdhci-msm sdiowakeup", host);
5101 if (ret) {
5102 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
5103 __func__, msm_host->pdata->sdiowakeup_irq, ret);
5104 msm_host->pdata->sdiowakeup_irq = -1;
5105 msm_host->is_sdiowakeup_enabled = false;
5106 goto vreg_deinit;
5107 } else {
5108 spin_lock_irqsave(&host->lock, flags);
5109 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305110 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305111 spin_unlock_irqrestore(&host->lock, flags);
5112 }
5113 }
5114
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07005115 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305116 ret = sdhci_add_host(host);
5117 if (ret) {
5118 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05305119 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05305120 }
5121
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05305122 msm_host->pltfm_init_done = true;
5123
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005124 pm_runtime_set_active(&pdev->dev);
5125 pm_runtime_enable(&pdev->dev);
5126 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
5127 pm_runtime_use_autosuspend(&pdev->dev);
5128
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305129 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
5130 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
5131 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
5132 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
5133 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
5134 ret = device_create_file(&pdev->dev,
5135 &msm_host->msm_bus_vote.max_bus_bw);
5136 if (ret)
5137 goto remove_host;
5138
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305139 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
5140 msm_host->polling.show = show_polling;
5141 msm_host->polling.store = store_polling;
5142 sysfs_attr_init(&msm_host->polling.attr);
5143 msm_host->polling.attr.name = "polling";
5144 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
5145 ret = device_create_file(&pdev->dev, &msm_host->polling);
5146 if (ret)
5147 goto remove_max_bus_bw_file;
5148 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05305149
5150 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
5151 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
5152 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
5153 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
5154 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
5155 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5156 if (ret) {
5157 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
5158 mmc_hostname(host->mmc), __func__, ret);
5159 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5160 }
Ankit Jain1d7e5182017-09-20 11:55:38 +05305161 if (sdhci_msm_is_bootdevice(&pdev->dev))
5162 mmc_flush_detect_work(host->mmc);
5163
Asutosh Das0ef24812012-12-18 16:14:02 +05305164 /* Successful initialization */
5165 goto out;
5166
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305167remove_max_bus_bw_file:
5168 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05305169remove_host:
5170 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005171 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305172 sdhci_remove_host(host, dead);
5173vreg_deinit:
5174 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305175bus_unregister:
5176 if (msm_host->msm_bus_vote.client_handle)
5177 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5178 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005179sleep_clk_disable:
5180 if (!IS_ERR(msm_host->sleep_clk))
5181 clk_disable_unprepare(msm_host->sleep_clk);
5182ff_clk_disable:
5183 if (!IS_ERR(msm_host->ff_clk))
5184 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305185clk_disable:
5186 if (!IS_ERR(msm_host->clk))
5187 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05305188bus_aggr_clk_disable:
5189 if (!IS_ERR(msm_host->bus_aggr_clk))
5190 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305191pclk_disable:
5192 if (!IS_ERR(msm_host->pclk))
5193 clk_disable_unprepare(msm_host->pclk);
5194bus_clk_disable:
5195 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
5196 clk_disable_unprepare(msm_host->bus_clk);
5197pltfm_free:
5198 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305199out_host_free:
5200 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305201out:
5202 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
5203 return ret;
5204}
5205
5206static int sdhci_msm_remove(struct platform_device *pdev)
5207{
5208 struct sdhci_host *host = platform_get_drvdata(pdev);
5209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5210 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5211 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
5212 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
5213 0xffffffff);
5214
5215 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305216 if (!gpio_is_valid(msm_host->pdata->status_gpio))
5217 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305218 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005219 pm_runtime_disable(&pdev->dev);
Vijay Viswanath1971d222018-03-01 12:01:47 +05305220
5221 if (msm_host->pm_qos_wq)
5222 destroy_workqueue(msm_host->pm_qos_wq);
Asutosh Das0ef24812012-12-18 16:14:02 +05305223 sdhci_remove_host(host, dead);
5224 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05305225
Asutosh Das0ef24812012-12-18 16:14:02 +05305226 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305227
Pratibhasagar V9acf2642013-11-21 21:07:21 +05305228 sdhci_msm_setup_pins(pdata, true);
5229 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305230
5231 if (msm_host->msm_bus_vote.client_handle) {
5232 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5233 sdhci_msm_bus_unregister(msm_host);
5234 }
Asutosh Das0ef24812012-12-18 16:14:02 +05305235 return 0;
5236}
5237
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005238#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05305239static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
5240{
5241 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5242 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5243 unsigned long flags;
5244 int ret = 0;
5245
5246 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
5247 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
5248 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305249 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305250 return 1;
5251 }
5252
5253 spin_lock_irqsave(&host->lock, flags);
5254 if (enable) {
5255 /* configure DAT1 gpio if applicable */
5256 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305257 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305258 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5259 if (!ret)
5260 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
5261 goto out;
5262 } else {
5263 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
5264 mmc_hostname(host->mmc), enable);
5265 }
5266 } else {
5267 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
5268 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5269 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305270 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305271 } else {
5272 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
5273 mmc_hostname(host->mmc), enable);
5274
5275 }
5276 }
5277out:
5278 if (ret)
5279 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
5280 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
5281 ret, msm_host->pdata->sdiowakeup_irq);
5282 spin_unlock_irqrestore(&host->lock, flags);
5283 return ret;
5284}
5285
5286
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005287static int sdhci_msm_runtime_suspend(struct device *dev)
5288{
5289 struct sdhci_host *host = dev_get_drvdata(dev);
5290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005292 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305293 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005294
Ritesh Harjani42876f42015-11-17 17:46:51 +05305295 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5296 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305297
Ritesh Harjani42876f42015-11-17 17:46:51 +05305298 sdhci_cfg_irq(host, false, true);
5299
5300defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005301 disable_irq(msm_host->pwr_irq);
5302
5303 /*
5304 * Remove the vote immediately only if clocks are off in which
5305 * case we might have queued work to remove vote but it may not
5306 * be completed before runtime suspend or system suspend.
5307 */
5308 if (!atomic_read(&msm_host->clks_on)) {
5309 if (msm_host->msm_bus_vote.client_handle)
5310 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5311 }
5312
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305313 if (host->is_crypto_en) {
5314 ret = sdhci_msm_ice_suspend(host);
5315 if (ret < 0)
5316 pr_err("%s: failed to suspend crypto engine %d\n",
5317 mmc_hostname(host->mmc), ret);
5318 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005319 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5320 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005321 return 0;
5322}
5323
5324static int sdhci_msm_runtime_resume(struct device *dev)
5325{
5326 struct sdhci_host *host = dev_get_drvdata(dev);
5327 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5328 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005329 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305330 int ret;
5331
5332 if (host->is_crypto_en) {
5333 ret = sdhci_msm_enable_controller_clock(host);
5334 if (ret) {
5335 pr_err("%s: Failed to enable reqd clocks\n",
5336 mmc_hostname(host->mmc));
5337 goto skip_ice_resume;
5338 }
5339 ret = sdhci_msm_ice_resume(host);
5340 if (ret)
5341 pr_err("%s: failed to resume crypto engine %d\n",
5342 mmc_hostname(host->mmc), ret);
5343 }
5344skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005345
Ritesh Harjani42876f42015-11-17 17:46:51 +05305346 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5347 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305348
Ritesh Harjani42876f42015-11-17 17:46:51 +05305349 sdhci_cfg_irq(host, true, true);
5350
5351defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005352 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005353
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005354 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5355 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005356 return 0;
5357}
5358
5359static int sdhci_msm_suspend(struct device *dev)
5360{
5361 struct sdhci_host *host = dev_get_drvdata(dev);
5362 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5363 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005364 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305365 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005366 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005367
5368 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5369 (msm_host->mmc->slot.cd_irq >= 0))
5370 disable_irq(msm_host->mmc->slot.cd_irq);
5371
5372 if (pm_runtime_suspended(dev)) {
5373 pr_debug("%s: %s: already runtime suspended\n",
5374 mmc_hostname(host->mmc), __func__);
5375 goto out;
5376 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005377 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005378out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305379 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305380 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5381 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5382 if (sdio_cfg)
5383 sdhci_cfg_irq(host, false, true);
5384 }
5385
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005386 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5387 ktime_to_us(ktime_sub(ktime_get(), start)));
5388 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005389}
5390
5391static int sdhci_msm_resume(struct device *dev)
5392{
5393 struct sdhci_host *host = dev_get_drvdata(dev);
5394 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5395 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5396 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305397 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005398 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005399
5400 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5401 (msm_host->mmc->slot.cd_irq >= 0))
5402 enable_irq(msm_host->mmc->slot.cd_irq);
5403
5404 if (pm_runtime_suspended(dev)) {
5405 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5406 mmc_hostname(host->mmc), __func__);
5407 goto out;
5408 }
5409
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005410 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005411out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305412 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5413 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5414 if (sdio_cfg)
5415 sdhci_cfg_irq(host, true, true);
5416 }
5417
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005418 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5419 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005420 return ret;
5421}
5422
Ritesh Harjani42876f42015-11-17 17:46:51 +05305423static int sdhci_msm_suspend_noirq(struct device *dev)
5424{
5425 struct sdhci_host *host = dev_get_drvdata(dev);
5426 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5427 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5428 int ret = 0;
5429
5430 /*
5431 * ksdioirqd may be running, hence retry
5432 * suspend in case the clocks are ON
5433 */
5434 if (atomic_read(&msm_host->clks_on)) {
5435 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5436 mmc_hostname(host->mmc), __func__);
5437 ret = -EAGAIN;
5438 }
5439
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305440 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5441 if (msm_host->sdio_pending_processing)
5442 ret = -EBUSY;
5443
Ritesh Harjani42876f42015-11-17 17:46:51 +05305444 return ret;
5445}
5446
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005447static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305448 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005449 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5450 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305451 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005452};
5453
5454#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5455
5456#else
5457#define SDHCI_MSM_PMOPS NULL
5458#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305459static const struct of_device_id sdhci_msm_dt_match[] = {
5460 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305461 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005462 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305463};
5464MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5465
5466static struct platform_driver sdhci_msm_driver = {
5467 .probe = sdhci_msm_probe,
5468 .remove = sdhci_msm_remove,
5469 .driver = {
5470 .name = "sdhci_msm",
5471 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305472 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305473 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005474 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305475 },
5476};
5477
5478module_platform_driver(sdhci_msm_driver);
5479
5480MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5481MODULE_LICENSE("GPL v2");