blob: 367c84fd121b19664819c92d6d6bd85e3d94119c [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053045#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070046#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053047
Asutosh Das36c2e922015-12-01 12:19:58 +053048#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080049#define CORE_POWER 0x0
50#define CORE_SW_RST (1 << 7)
51
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070052#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080053
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053063
64#define CORE_VERSION_MAJOR_MASK 0xF0000000
65#define CORE_VERSION_MAJOR_SHIFT 28
66
Asutosh Das0ef24812012-12-18 16:14:02 +053067#define CORE_HC_MODE 0x78
68#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070069#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053070
Asutosh Das0ef24812012-12-18 16:14:02 +053071#define CORE_PWRCTL_BUS_OFF 0x01
72#define CORE_PWRCTL_BUS_ON (1 << 1)
73#define CORE_PWRCTL_IO_LOW (1 << 2)
74#define CORE_PWRCTL_IO_HIGH (1 << 3)
75
76#define CORE_PWRCTL_BUS_SUCCESS 0x01
77#define CORE_PWRCTL_BUS_FAIL (1 << 1)
78#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
79#define CORE_PWRCTL_IO_FAIL (1 << 3)
80
81#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070082#define MAX_PHASES 16
83
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070084#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070085#define CORE_DLL_EN (1 << 16)
86#define CORE_CDR_EN (1 << 17)
87#define CORE_CK_OUT_EN (1 << 18)
88#define CORE_CDR_EXT_EN (1 << 19)
89#define CORE_DLL_PDN (1 << 29)
90#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070093#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094
Krishna Konda46fd1432014-10-30 21:13:27 -070095#define CORE_CLK_PWRSAVE (1 << 1)
96#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
97#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
98#define CORE_HC_MCLK_SEL_MASK (3 << 8)
99#define CORE_HC_AUTO_CMD21_EN (1 << 6)
100#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700101#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700102#define CORE_HC_SELECT_IN_EN (1 << 18)
103#define CORE_HC_SELECT_IN_HS400 (6 << 19)
104#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700105#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700106
Pavan Anamula691dd592015-08-25 16:11:20 +0530107#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
108#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530109#define CORE_ONE_MID_EN (1 << 25)
110
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530111#define CORE_8_BIT_SUPPORT (1 << 18)
112#define CORE_3_3V_SUPPORT (1 << 24)
113#define CORE_3_0V_SUPPORT (1 << 25)
114#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300115#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700116
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700117#define CORE_CSR_CDC_CTLR_CFG0 0x130
118#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
119#define CORE_HW_AUTOCAL_ENA (1 << 17)
120
121#define CORE_CSR_CDC_CTLR_CFG1 0x134
122#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
123#define CORE_TIMER_ENA (1 << 16)
124
125#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
126#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
127#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
128#define CORE_CDC_OFFSET_CFG 0x14C
129#define CORE_CSR_CDC_DELAY_CFG 0x150
130#define CORE_CDC_SLAVE_DDA_CFG 0x160
131#define CORE_CSR_CDC_STATUS0 0x164
132#define CORE_CALIBRATION_DONE (1 << 0)
133
134#define CORE_CDC_ERROR_CODE_MASK 0x7000000
135
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300136#define CQ_CMD_DBG_RAM 0x110
137#define CQ_CMD_DBG_RAM_WA 0x150
138#define CQ_CMD_DBG_RAM_OL 0x154
139
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700140#define CORE_CSR_CDC_GEN_CFG 0x178
141#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
142#define CORE_CDC_SWITCH_RC_EN (1 << 1)
143
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700144#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530145#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700146#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530147
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700148#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530149#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530150#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700151
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700152#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800153#define CORE_FLL_CYCLE_CNT (1 << 18)
154#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700155
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530156#define DDR_CONFIG_POR_VAL 0x80040853
157#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
158#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700159#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700160
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700161/* 512 descriptors */
162#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530163#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530164
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800166#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700167
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530169#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700170
Krishna Konda96e6b112013-10-28 15:25:03 -0700171#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200172#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200173#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700174
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530175struct sdhci_msm_offset {
176 u32 CORE_MCI_DATA_CNT;
177 u32 CORE_MCI_STATUS;
178 u32 CORE_MCI_FIFO_CNT;
179 u32 CORE_MCI_VERSION;
180 u32 CORE_GENERICS;
181 u32 CORE_TESTBUS_CONFIG;
182 u32 CORE_TESTBUS_SEL2_BIT;
183 u32 CORE_TESTBUS_ENA;
184 u32 CORE_TESTBUS_SEL2;
185 u32 CORE_PWRCTL_STATUS;
186 u32 CORE_PWRCTL_MASK;
187 u32 CORE_PWRCTL_CLEAR;
188 u32 CORE_PWRCTL_CTL;
189 u32 CORE_SDCC_DEBUG_REG;
190 u32 CORE_DLL_CONFIG;
191 u32 CORE_DLL_STATUS;
192 u32 CORE_VENDOR_SPEC;
193 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
194 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
195 u32 CORE_VENDOR_SPEC_FUNC2;
196 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
197 u32 CORE_DDR_200_CFG;
198 u32 CORE_VENDOR_SPEC3;
199 u32 CORE_DLL_CONFIG_2;
200 u32 CORE_DDR_CONFIG;
201 u32 CORE_DDR_CONFIG_2;
202};
203
204struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
205 .CORE_MCI_DATA_CNT = 0x35C,
206 .CORE_MCI_STATUS = 0x324,
207 .CORE_MCI_FIFO_CNT = 0x308,
208 .CORE_MCI_VERSION = 0x318,
209 .CORE_GENERICS = 0x320,
210 .CORE_TESTBUS_CONFIG = 0x32C,
211 .CORE_TESTBUS_SEL2_BIT = 3,
212 .CORE_TESTBUS_ENA = (1 << 31),
213 .CORE_TESTBUS_SEL2 = (1 << 3),
214 .CORE_PWRCTL_STATUS = 0x240,
215 .CORE_PWRCTL_MASK = 0x244,
216 .CORE_PWRCTL_CLEAR = 0x248,
217 .CORE_PWRCTL_CTL = 0x24C,
218 .CORE_SDCC_DEBUG_REG = 0x358,
219 .CORE_DLL_CONFIG = 0x200,
220 .CORE_DLL_STATUS = 0x208,
221 .CORE_VENDOR_SPEC = 0x20C,
222 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
223 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
224 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
225 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
226 .CORE_DDR_200_CFG = 0x224,
227 .CORE_VENDOR_SPEC3 = 0x250,
228 .CORE_DLL_CONFIG_2 = 0x254,
229 .CORE_DDR_CONFIG = 0x258,
230 .CORE_DDR_CONFIG_2 = 0x25C,
231};
232
233struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
234 .CORE_MCI_DATA_CNT = 0x30,
235 .CORE_MCI_STATUS = 0x34,
236 .CORE_MCI_FIFO_CNT = 0x44,
237 .CORE_MCI_VERSION = 0x050,
238 .CORE_GENERICS = 0x70,
239 .CORE_TESTBUS_CONFIG = 0x0CC,
240 .CORE_TESTBUS_SEL2_BIT = 4,
241 .CORE_TESTBUS_ENA = (1 << 3),
242 .CORE_TESTBUS_SEL2 = (1 << 4),
243 .CORE_PWRCTL_STATUS = 0xDC,
244 .CORE_PWRCTL_MASK = 0xE0,
245 .CORE_PWRCTL_CLEAR = 0xE4,
246 .CORE_PWRCTL_CTL = 0xE8,
247 .CORE_SDCC_DEBUG_REG = 0x124,
248 .CORE_DLL_CONFIG = 0x100,
249 .CORE_DLL_STATUS = 0x108,
250 .CORE_VENDOR_SPEC = 0x10C,
251 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
252 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
253 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
254 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
255 .CORE_DDR_200_CFG = 0x184,
256 .CORE_VENDOR_SPEC3 = 0x1B0,
257 .CORE_DLL_CONFIG_2 = 0x1B4,
258 .CORE_DDR_CONFIG = 0x1B8,
259 .CORE_DDR_CONFIG_2 = 0x1BC,
260};
261
262u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
263{
264 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
265 struct sdhci_msm_host *msm_host = pltfm_host->priv;
266 void __iomem *base_addr;
267
268 if (msm_host->mci_removed)
269 base_addr = host->ioaddr;
270 else
271 base_addr = msm_host->core_mem;
272
273 return readb_relaxed(base_addr + offset);
274}
275
276u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
277{
278 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
279 struct sdhci_msm_host *msm_host = pltfm_host->priv;
280 void __iomem *base_addr;
281
282 if (msm_host->mci_removed)
283 base_addr = host->ioaddr;
284 else
285 base_addr = msm_host->core_mem;
286
287 return readl_relaxed(base_addr + offset);
288}
289
290void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
291{
292 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
293 struct sdhci_msm_host *msm_host = pltfm_host->priv;
294 void __iomem *base_addr;
295
296 if (msm_host->mci_removed)
297 base_addr = host->ioaddr;
298 else
299 base_addr = msm_host->core_mem;
300
301 writeb_relaxed(val, base_addr + offset);
302}
303
304void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
305{
306 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
307 struct sdhci_msm_host *msm_host = pltfm_host->priv;
308 void __iomem *base_addr;
309
310 if (msm_host->mci_removed)
311 base_addr = host->ioaddr;
312 else
313 base_addr = msm_host->core_mem;
314
315 writel_relaxed(val, base_addr + offset);
316}
317
Ritesh Harjani82124772014-11-04 15:34:00 +0530318/* Timeout value to avoid infinite waiting for pwr_irq */
319#define MSM_PWR_IRQ_TIMEOUT_MS 5000
320
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700321static const u32 tuning_block_64[] = {
322 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
323 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
324 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
325 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
326};
327
328static const u32 tuning_block_128[] = {
329 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
330 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
331 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
332 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
333 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
334 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
335 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
336 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
337};
Asutosh Das0ef24812012-12-18 16:14:02 +0530338
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700339/* global to hold each slot instance for debug */
340static struct sdhci_msm_host *sdhci_slot[2];
341
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700342static int disable_slots;
343/* root can write, others read */
344module_param(disable_slots, int, S_IRUGO|S_IWUSR);
345
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530346static bool nocmdq;
347module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
348
Asutosh Das0ef24812012-12-18 16:14:02 +0530349enum vdd_io_level {
350 /* set vdd_io_data->low_vol_level */
351 VDD_IO_LOW,
352 /* set vdd_io_data->high_vol_level */
353 VDD_IO_HIGH,
354 /*
355 * set whatever there in voltage_level (third argument) of
356 * sdhci_msm_set_vdd_io_vol() function.
357 */
358 VDD_IO_SET_LEVEL,
359};
360
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700361/* MSM platform specific tuning */
362static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
363 u8 poll)
364{
365 int rc = 0;
366 u32 wait_cnt = 50;
367 u8 ck_out_en = 0;
368 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530369 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
370 struct sdhci_msm_host *msm_host = pltfm_host->priv;
371 const struct sdhci_msm_offset *msm_host_offset =
372 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700373
374 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530375 ck_out_en = !!(readl_relaxed(host->ioaddr +
376 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700377
378 while (ck_out_en != poll) {
379 if (--wait_cnt == 0) {
380 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
381 mmc_hostname(mmc), __func__, poll);
382 rc = -ETIMEDOUT;
383 goto out;
384 }
385 udelay(1);
386
387 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530388 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700389 }
390out:
391 return rc;
392}
393
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530394/*
395 * Enable CDR to track changes of DAT lines and adjust sampling
396 * point according to voltage/temperature variations
397 */
398static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
399{
400 int rc = 0;
401 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530402 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
403 struct sdhci_msm_host *msm_host = pltfm_host->priv;
404 const struct sdhci_msm_offset *msm_host_offset =
405 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530406
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530407 config = readl_relaxed(host->ioaddr +
408 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530409 config |= CORE_CDR_EN;
410 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530411 writel_relaxed(config, host->ioaddr +
412 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530413
414 rc = msm_dll_poll_ck_out_en(host, 0);
415 if (rc)
416 goto err;
417
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530418 writel_relaxed((readl_relaxed(host->ioaddr +
419 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
420 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530421
422 rc = msm_dll_poll_ck_out_en(host, 1);
423 if (rc)
424 goto err;
425 goto out;
426err:
427 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
428out:
429 return rc;
430}
431
432static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
433 *attr, const char *buf, size_t count)
434{
435 struct sdhci_host *host = dev_get_drvdata(dev);
436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
437 struct sdhci_msm_host *msm_host = pltfm_host->priv;
438 u32 tmp;
439 unsigned long flags;
440
441 if (!kstrtou32(buf, 0, &tmp)) {
442 spin_lock_irqsave(&host->lock, flags);
443 msm_host->en_auto_cmd21 = !!tmp;
444 spin_unlock_irqrestore(&host->lock, flags);
445 }
446 return count;
447}
448
449static ssize_t show_auto_cmd21(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct sdhci_host *host = dev_get_drvdata(dev);
453 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
454 struct sdhci_msm_host *msm_host = pltfm_host->priv;
455
456 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
457}
458
459/* MSM auto-tuning handler */
460static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
461 bool enable,
462 u32 type)
463{
464 int rc = 0;
465 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
466 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530467 const struct sdhci_msm_offset *msm_host_offset =
468 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530469 u32 val = 0;
470
471 if (!msm_host->en_auto_cmd21)
472 return 0;
473
474 if (type == MMC_SEND_TUNING_BLOCK_HS200)
475 val = CORE_HC_AUTO_CMD21_EN;
476 else
477 return 0;
478
479 if (enable) {
480 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530481 writel_relaxed(readl_relaxed(host->ioaddr +
482 msm_host_offset->CORE_VENDOR_SPEC) | val,
483 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530484 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530485 writel_relaxed(readl_relaxed(host->ioaddr +
486 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
487 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530488 }
489 return rc;
490}
491
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700492static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
493{
494 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530495 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
496 struct sdhci_msm_host *msm_host = pltfm_host->priv;
497 const struct sdhci_msm_offset *msm_host_offset =
498 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700499 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
500 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
501 0x8};
502 unsigned long flags;
503 u32 config;
504 struct mmc_host *mmc = host->mmc;
505
506 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
507 spin_lock_irqsave(&host->lock, flags);
508
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530509 config = readl_relaxed(host->ioaddr +
510 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700511 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
512 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530513 writel_relaxed(config, host->ioaddr +
514 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700515
516 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
517 rc = msm_dll_poll_ck_out_en(host, 0);
518 if (rc)
519 goto err_out;
520
521 /*
522 * Write the selected DLL clock output phase (0 ... 15)
523 * to CDR_SELEXT bit field of DLL_CONFIG register.
524 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530525 writel_relaxed(((readl_relaxed(host->ioaddr +
526 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700527 & ~(0xF << 20))
528 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530529 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700530
531 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530532 writel_relaxed((readl_relaxed(host->ioaddr +
533 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
534 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700535
536 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
537 rc = msm_dll_poll_ck_out_en(host, 1);
538 if (rc)
539 goto err_out;
540
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530541 config = readl_relaxed(host->ioaddr +
542 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700543 config |= CORE_CDR_EN;
544 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530545 writel_relaxed(config, host->ioaddr +
546 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700547 goto out;
548
549err_out:
550 pr_err("%s: %s: Failed to set DLL phase: %d\n",
551 mmc_hostname(mmc), __func__, phase);
552out:
553 spin_unlock_irqrestore(&host->lock, flags);
554 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
555 return rc;
556}
557
558/*
559 * Find out the greatest range of consecuitive selected
560 * DLL clock output phases that can be used as sampling
561 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700562 * timing mode) or for eMMC4.5 card read operation (in
563 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700564 * Select the 3/4 of the range and configure the DLL with the
565 * selected DLL clock output phase.
566 */
567
568static int msm_find_most_appropriate_phase(struct sdhci_host *host,
569 u8 *phase_table, u8 total_phases)
570{
571 int ret;
572 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
573 u8 phases_per_row[MAX_PHASES] = {0};
574 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
575 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
576 bool phase_0_found = false, phase_15_found = false;
577 struct mmc_host *mmc = host->mmc;
578
579 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
580 if (!total_phases || (total_phases > MAX_PHASES)) {
581 pr_err("%s: %s: invalid argument: total_phases=%d\n",
582 mmc_hostname(mmc), __func__, total_phases);
583 return -EINVAL;
584 }
585
586 for (cnt = 0; cnt < total_phases; cnt++) {
587 ranges[row_index][col_index] = phase_table[cnt];
588 phases_per_row[row_index] += 1;
589 col_index++;
590
591 if ((cnt + 1) == total_phases) {
592 continue;
593 /* check if next phase in phase_table is consecutive or not */
594 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
595 row_index++;
596 col_index = 0;
597 }
598 }
599
600 if (row_index >= MAX_PHASES)
601 return -EINVAL;
602
603 /* Check if phase-0 is present in first valid window? */
604 if (!ranges[0][0]) {
605 phase_0_found = true;
606 phase_0_raw_index = 0;
607 /* Check if cycle exist between 2 valid windows */
608 for (cnt = 1; cnt <= row_index; cnt++) {
609 if (phases_per_row[cnt]) {
610 for (i = 0; i < phases_per_row[cnt]; i++) {
611 if (ranges[cnt][i] == 15) {
612 phase_15_found = true;
613 phase_15_raw_index = cnt;
614 break;
615 }
616 }
617 }
618 }
619 }
620
621 /* If 2 valid windows form cycle then merge them as single window */
622 if (phase_0_found && phase_15_found) {
623 /* number of phases in raw where phase 0 is present */
624 u8 phases_0 = phases_per_row[phase_0_raw_index];
625 /* number of phases in raw where phase 15 is present */
626 u8 phases_15 = phases_per_row[phase_15_raw_index];
627
628 if (phases_0 + phases_15 >= MAX_PHASES)
629 /*
630 * If there are more than 1 phase windows then total
631 * number of phases in both the windows should not be
632 * more than or equal to MAX_PHASES.
633 */
634 return -EINVAL;
635
636 /* Merge 2 cyclic windows */
637 i = phases_15;
638 for (cnt = 0; cnt < phases_0; cnt++) {
639 ranges[phase_15_raw_index][i] =
640 ranges[phase_0_raw_index][cnt];
641 if (++i >= MAX_PHASES)
642 break;
643 }
644
645 phases_per_row[phase_0_raw_index] = 0;
646 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
647 }
648
649 for (cnt = 0; cnt <= row_index; cnt++) {
650 if (phases_per_row[cnt] > curr_max) {
651 curr_max = phases_per_row[cnt];
652 selected_row_index = cnt;
653 }
654 }
655
656 i = ((curr_max * 3) / 4);
657 if (i)
658 i--;
659
660 ret = (int)ranges[selected_row_index][i];
661
662 if (ret >= MAX_PHASES) {
663 ret = -EINVAL;
664 pr_err("%s: %s: invalid phase selected=%d\n",
665 mmc_hostname(mmc), __func__, ret);
666 }
667
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return ret;
670}
671
672static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
673{
674 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530675 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
676 struct sdhci_msm_host *msm_host = pltfm_host->priv;
677 const struct sdhci_msm_offset *msm_host_offset =
678 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700679
680 /* Program the MCLK value to MCLK_FREQ bit field */
681 if (host->clock <= 112000000)
682 mclk_freq = 0;
683 else if (host->clock <= 125000000)
684 mclk_freq = 1;
685 else if (host->clock <= 137000000)
686 mclk_freq = 2;
687 else if (host->clock <= 150000000)
688 mclk_freq = 3;
689 else if (host->clock <= 162000000)
690 mclk_freq = 4;
691 else if (host->clock <= 175000000)
692 mclk_freq = 5;
693 else if (host->clock <= 187000000)
694 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700695 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700696 mclk_freq = 7;
697
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530698 writel_relaxed(((readl_relaxed(host->ioaddr +
699 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530701 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700702}
703
704/* Initialize the DLL (Programmable Delay Line ) */
705static int msm_init_cm_dll(struct sdhci_host *host)
706{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800707 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
708 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530709 const struct sdhci_msm_offset *msm_host_offset =
710 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700711 struct mmc_host *mmc = host->mmc;
712 int rc = 0;
713 unsigned long flags;
714 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530715 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700716
717 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
718 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530719 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
720 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530721 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700722 /*
723 * Make sure that clock is always enabled when DLL
724 * tuning is in progress. Keeping PWRSAVE ON may
725 * turn off the clock. So let's disable the PWRSAVE
726 * here and re-enable it once tuning is completed.
727 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530728 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530729 writel_relaxed((readl_relaxed(host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC)
731 & ~CORE_CLK_PWRSAVE), host->ioaddr +
732 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530733 curr_pwrsave = false;
734 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700735
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800736 if (msm_host->use_updated_dll_reset) {
737 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530738 writel_relaxed((readl_relaxed(host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG)
740 & ~CORE_CK_OUT_EN), host->ioaddr +
741 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800742
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530743 writel_relaxed((readl_relaxed(host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2)
745 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
746 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800747 }
748
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700749 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530750 writel_relaxed((readl_relaxed(host->ioaddr +
751 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
752 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700753
754 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530755 writel_relaxed((readl_relaxed(host->ioaddr +
756 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
757 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700758 msm_cm_dll_set_freq(host);
759
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800760 if (msm_host->use_updated_dll_reset) {
761 u32 mclk_freq = 0;
762
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530763 if ((readl_relaxed(host->ioaddr +
764 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800765 & CORE_FLL_CYCLE_CNT))
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
767 else
768 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
769
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530770 writel_relaxed(((readl_relaxed(host->ioaddr +
771 msm_host_offset->CORE_DLL_CONFIG_2)
772 & ~(0xFF << 10)) | (mclk_freq << 10)),
773 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800774 /* wait for 5us before enabling DLL clock */
775 udelay(5);
776 }
777
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700778 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530779 writel_relaxed((readl_relaxed(host->ioaddr +
780 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
781 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700782
783 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530784 writel_relaxed((readl_relaxed(host->ioaddr +
785 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
786 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700787
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800788 if (msm_host->use_updated_dll_reset) {
789 msm_cm_dll_set_freq(host);
790 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530791 writel_relaxed((readl_relaxed(host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2)
793 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
794 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800795 }
796
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700797 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530798 writel_relaxed((readl_relaxed(host->ioaddr +
799 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
800 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700801
802 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530803 writel_relaxed((readl_relaxed(host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG)
805 | CORE_CK_OUT_EN), host->ioaddr +
806 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700807
808 wait_cnt = 50;
809 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530810 while (!(readl_relaxed(host->ioaddr +
811 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700812 /* max. wait for 50us sec for LOCK bit to be set */
813 if (--wait_cnt == 0) {
814 pr_err("%s: %s: DLL failed to LOCK\n",
815 mmc_hostname(mmc), __func__);
816 rc = -ETIMEDOUT;
817 goto out;
818 }
819 /* wait for 1us before polling again */
820 udelay(1);
821 }
822
823out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530824 /* Restore the correct PWRSAVE state */
825 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530826 u32 reg = readl_relaxed(host->ioaddr +
827 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530828
829 if (prev_pwrsave)
830 reg |= CORE_CLK_PWRSAVE;
831 else
832 reg &= ~CORE_CLK_PWRSAVE;
833
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530834 writel_relaxed(reg, host->ioaddr +
835 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530836 }
837
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700838 spin_unlock_irqrestore(&host->lock, flags);
839 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
840 return rc;
841}
842
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700843static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
844{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700845 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700846 int ret = 0;
847 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
850 const struct sdhci_msm_offset *msm_host_offset =
851 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700852
853 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
854
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700855 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530856 writel_relaxed((readl_relaxed(host->ioaddr +
857 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530859 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700860
861 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
862 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
863 & ~CORE_CDC_SWITCH_BYPASS_OFF),
864 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
865
866 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
867 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
868 | CORE_CDC_SWITCH_RC_EN),
869 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
870
871 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530872 writel_relaxed((readl_relaxed(host->ioaddr +
873 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530875 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700876
877 /*
878 * Perform CDC Register Initialization Sequence
879 *
880 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
881 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
882 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
883 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
884 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
885 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
886 * CORE_CSR_CDC_DELAY_CFG 0x3AC
887 * CORE_CDC_OFFSET_CFG 0x0
888 * CORE_CDC_SLAVE_DDA_CFG 0x16334
889 */
890
891 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
892 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
893 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
894 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
895 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
896 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700897 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700898 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
899 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
900
901 /* CDC HW Calibration */
902
903 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
904 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
905 | CORE_SW_TRIG_FULL_CALIB),
906 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
907
908 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
909 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
910 & ~CORE_SW_TRIG_FULL_CALIB),
911 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
912
913 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
914 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
915 | CORE_HW_AUTOCAL_ENA),
916 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
917
918 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
919 writel_relaxed((readl_relaxed(host->ioaddr +
920 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
921 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
922
923 mb();
924
925 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700926 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
927 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
928
929 if (ret == -ETIMEDOUT) {
930 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700932 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700933 }
934
935 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
936 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
937 & CORE_CDC_ERROR_CODE_MASK;
938 if (cdc_err) {
939 pr_err("%s: %s: CDC Error Code %d\n",
940 mmc_hostname(host->mmc), __func__, cdc_err);
941 ret = -EINVAL;
942 goto out;
943 }
944
945 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530946 writel_relaxed((readl_relaxed(host->ioaddr +
947 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530949 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700950out:
951 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
952 __func__, ret);
953 return ret;
954}
955
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700956static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
957{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530958 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
959 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530960 const struct sdhci_msm_offset *msm_host_offset =
961 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530962 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700963 int ret = 0;
964
965 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
966
967 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530968 * Reprogramming the value in case it might have been modified by
969 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700970 */
Vijay Viswanatha5492612017-10-17 15:38:55 +0530971 if (msm_host->pdata->rclk_wa) {
972 writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
973 msm_host_offset->CORE_DDR_CONFIG_2);
974 } else if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530975 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
976 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700977 } else {
978 ddr_config = DDR_CONFIG_POR_VAL &
979 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
980 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed(ddr_config, host->ioaddr +
982 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700983 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700984
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530985 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530986 writel_relaxed((readl_relaxed(host->ioaddr +
987 msm_host_offset->CORE_DDR_200_CFG)
988 | CORE_CMDIN_RCLK_EN), host->ioaddr +
989 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530990
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530992 writel_relaxed((readl_relaxed(host->ioaddr +
993 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700994 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530995 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700996
997 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530998 ret = readl_poll_timeout(host->ioaddr +
999 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001000 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
1001
1002 if (ret == -ETIMEDOUT) {
1003 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1004 mmc_hostname(host->mmc), __func__);
1005 goto out;
1006 }
1007
Ritesh Harjani764065e2015-05-13 14:14:45 +05301008 /*
1009 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1010 * when MCLK is gated OFF, it is not gated for less than 0.5us
1011 * and MCLK must be switched on for at-least 1us before DATA
1012 * starts coming. Controllers with 14lpp tech DLL cannot
1013 * guarantee above requirement. So PWRSAVE_DLL should not be
1014 * turned on for host controllers using this DLL.
1015 */
1016 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301017 writel_relaxed((readl_relaxed(host->ioaddr +
1018 msm_host_offset->CORE_VENDOR_SPEC3)
1019 | CORE_PWRSAVE_DLL), host->ioaddr +
1020 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001021 mb();
1022out:
1023 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1024 __func__, ret);
1025 return ret;
1026}
1027
Ritesh Harjaniea709662015-05-27 15:40:24 +05301028static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1029{
1030 int ret = 0;
1031 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1032 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1033 struct mmc_host *mmc = host->mmc;
1034
1035 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1036
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301037 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1038 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301039 mmc_hostname(mmc));
1040 return -EINVAL;
1041 }
1042
1043 if (msm_host->calibration_done ||
1044 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1045 return 0;
1046 }
1047
1048 /*
1049 * Reset the tuning block.
1050 */
1051 ret = msm_init_cm_dll(host);
1052 if (ret)
1053 goto out;
1054
1055 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1056out:
1057 if (!ret)
1058 msm_host->calibration_done = true;
1059 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1060 __func__, ret);
1061 return ret;
1062}
1063
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001064static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1065{
1066 int ret = 0;
1067 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1068 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301069 const struct sdhci_msm_offset *msm_host_offset =
1070 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001071
1072 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1073
1074 /*
1075 * Retuning in HS400 (DDR mode) will fail, just reset the
1076 * tuning block and restore the saved tuning phase.
1077 */
1078 ret = msm_init_cm_dll(host);
1079 if (ret)
1080 goto out;
1081
1082 /* Set the selected phase in delay line hw block */
1083 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1084 if (ret)
1085 goto out;
1086
Krishna Konda0e8efba2014-06-23 14:50:38 -07001087 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301088 writel_relaxed((readl_relaxed(host->ioaddr +
1089 msm_host_offset->CORE_DLL_CONFIG)
1090 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1091 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001092
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001093 if (msm_host->use_cdclp533)
1094 /* Calibrate CDCLP533 DLL HW */
1095 ret = sdhci_msm_cdclp533_calibration(host);
1096 else
1097 /* Calibrate CM_DLL_SDC4 HW */
1098 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1099out:
1100 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1101 __func__, ret);
1102 return ret;
1103}
1104
Krishna Konda96e6b112013-10-28 15:25:03 -07001105static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1106 u8 drv_type)
1107{
1108 struct mmc_command cmd = {0};
1109 struct mmc_request mrq = {NULL};
1110 struct mmc_host *mmc = host->mmc;
1111 u8 val = ((drv_type << 4) | 2);
1112
1113 cmd.opcode = MMC_SWITCH;
1114 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1115 (EXT_CSD_HS_TIMING << 16) |
1116 (val << 8) |
1117 EXT_CSD_CMD_SET_NORMAL;
1118 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1119 /* 1 sec */
1120 cmd.busy_timeout = 1000 * 1000;
1121
1122 memset(cmd.resp, 0, sizeof(cmd.resp));
1123 cmd.retries = 3;
1124
1125 mrq.cmd = &cmd;
1126 cmd.data = NULL;
1127
1128 mmc_wait_for_req(mmc, &mrq);
1129 pr_debug("%s: %s: set card drive type to %d\n",
1130 mmc_hostname(mmc), __func__,
1131 drv_type);
1132}
1133
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001134int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1135{
1136 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301137 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001138 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001139 const u32 *tuning_block_pattern = tuning_block_64;
1140 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1141 int rc;
1142 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301143 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001144 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1145 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001146 u8 drv_type = 0;
1147 bool drv_type_changed = false;
1148 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301149 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301150 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301151
1152 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001153 * Tuning is required for SDR104, HS200 and HS400 cards and
1154 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301155 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001156 if (host->clock <= CORE_FREQ_100MHZ ||
1157 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1158 (ios.timing == MMC_TIMING_MMC_HS200) ||
1159 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301160 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001161
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301162 /*
1163 * Don't allow re-tuning for CRC errors observed for any commands
1164 * that are sent during tuning sequence itself.
1165 */
1166 if (msm_host->tuning_in_progress)
1167 return 0;
1168 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001169 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001170
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001171 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001172 if (msm_host->tuning_done && !msm_host->calibration_done &&
1173 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001174 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001175 spin_lock_irqsave(&host->lock, flags);
1176 if (!rc)
1177 msm_host->calibration_done = true;
1178 spin_unlock_irqrestore(&host->lock, flags);
1179 goto out;
1180 }
1181
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001182 spin_lock_irqsave(&host->lock, flags);
1183
1184 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1185 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1186 tuning_block_pattern = tuning_block_128;
1187 size = sizeof(tuning_block_128);
1188 }
1189 spin_unlock_irqrestore(&host->lock, flags);
1190
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001191 data_buf = kmalloc(size, GFP_KERNEL);
1192 if (!data_buf) {
1193 rc = -ENOMEM;
1194 goto out;
1195 }
1196
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301197retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001198 tuned_phase_cnt = 0;
1199
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301200 /* first of all reset the tuning block */
1201 rc = msm_init_cm_dll(host);
1202 if (rc)
1203 goto kfree;
1204
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001205 phase = 0;
1206 do {
1207 struct mmc_command cmd = {0};
1208 struct mmc_data data = {0};
1209 struct mmc_request mrq = {
1210 .cmd = &cmd,
1211 .data = &data
1212 };
1213 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301214 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001215
1216 /* set the phase in delay line hw block */
1217 rc = msm_config_cm_dll_phase(host, phase);
1218 if (rc)
1219 goto kfree;
1220
1221 cmd.opcode = opcode;
1222 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1223
1224 data.blksz = size;
1225 data.blocks = 1;
1226 data.flags = MMC_DATA_READ;
1227 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1228
1229 data.sg = &sg;
1230 data.sg_len = 1;
1231 sg_init_one(&sg, data_buf, size);
1232 memset(data_buf, 0, size);
1233 mmc_wait_for_req(mmc, &mrq);
1234
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301235 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301236 /*
1237 * Set the dll to last known good phase while sending
1238 * status command to ensure that status command won't
1239 * fail due to bad phase.
1240 */
1241 if (tuned_phase_cnt)
1242 last_good_phase =
1243 tuned_phases[tuned_phase_cnt-1];
1244 else if (msm_host->saved_tuning_phase !=
1245 INVALID_TUNING_PHASE)
1246 last_good_phase = msm_host->saved_tuning_phase;
1247
1248 rc = msm_config_cm_dll_phase(host, last_good_phase);
1249 if (rc)
1250 goto kfree;
1251
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301252 sts_cmd.opcode = MMC_SEND_STATUS;
1253 sts_cmd.arg = card->rca << 16;
1254 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1255 sts_retry = 5;
1256 while (sts_retry) {
1257 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1258
1259 if (sts_cmd.error ||
1260 (R1_CURRENT_STATE(sts_cmd.resp[0])
1261 != R1_STATE_TRAN)) {
1262 sts_retry--;
1263 /*
1264 * wait for at least 146 MCLK cycles for
1265 * the card to move to TRANS state. As
1266 * the MCLK would be min 200MHz for
1267 * tuning, we need max 0.73us delay. To
1268 * be on safer side 1ms delay is given.
1269 */
1270 usleep_range(1000, 1200);
1271 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1272 mmc_hostname(mmc), phase,
1273 sts_cmd.error, sts_cmd.resp[0]);
1274 continue;
1275 }
1276 break;
1277 };
1278 }
1279
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001280 if (!cmd.error && !data.error &&
1281 !memcmp(data_buf, tuning_block_pattern, size)) {
1282 /* tuning is successful at this tuning point */
1283 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001284 pr_debug("%s: %s: found *** good *** phase = %d\n",
1285 mmc_hostname(mmc), __func__, phase);
1286 } else {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301287 /* Ignore crc errors occurred during tuning */
1288 if (cmd.error)
1289 mmc->err_stats[MMC_ERR_CMD_CRC]--;
1290 else if (data.error)
1291 mmc->err_stats[MMC_ERR_DAT_CRC]--;
Krishna Konda96e6b112013-10-28 15:25:03 -07001292 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001293 mmc_hostname(mmc), __func__, phase);
1294 }
1295 } while (++phase < 16);
1296
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301297 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1298 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001299 /*
1300 * If all phases pass then its a problem. So change the card's
1301 * drive type to a different value, if supported and repeat
1302 * tuning until at least one phase fails. Then set the original
1303 * drive type back.
1304 *
1305 * If all the phases still pass after trying all possible
1306 * drive types, then one of those 16 phases will be picked.
1307 * This is no different from what was going on before the
1308 * modification to change drive type and retune.
1309 */
1310 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1311 tuned_phase_cnt);
1312
1313 /* set drive type to other value . default setting is 0x0 */
1314 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001315 pr_debug("%s: trying different drive strength (%d)\n",
1316 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001317 if (card->ext_csd.raw_driver_strength &
1318 (1 << drv_type)) {
1319 sdhci_msm_set_mmc_drv_type(host, opcode,
1320 drv_type);
1321 if (!drv_type_changed)
1322 drv_type_changed = true;
1323 goto retry;
1324 }
1325 }
1326 }
1327
1328 /* reset drive type to default (50 ohm) if changed */
1329 if (drv_type_changed)
1330 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1331
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001332 if (tuned_phase_cnt) {
1333 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1334 tuned_phase_cnt);
1335 if (rc < 0)
1336 goto kfree;
1337 else
1338 phase = (u8)rc;
1339
1340 /*
1341 * Finally set the selected phase in delay
1342 * line hw block.
1343 */
1344 rc = msm_config_cm_dll_phase(host, phase);
1345 if (rc)
1346 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001347 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001348 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1349 mmc_hostname(mmc), __func__, phase);
1350 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301351 if (--tuning_seq_cnt)
1352 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001353 /* tuning failed */
1354 pr_err("%s: %s: no tuning point found\n",
1355 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301356 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001357 }
1358
1359kfree:
1360 kfree(data_buf);
1361out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001362 spin_lock_irqsave(&host->lock, flags);
1363 if (!rc)
1364 msm_host->tuning_done = true;
1365 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301366 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001367 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001368 return rc;
1369}
1370
Asutosh Das0ef24812012-12-18 16:14:02 +05301371static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1372{
1373 struct sdhci_msm_gpio_data *curr;
1374 int i, ret = 0;
1375
1376 curr = pdata->pin_data->gpio_data;
1377 for (i = 0; i < curr->size; i++) {
1378 if (!gpio_is_valid(curr->gpio[i].no)) {
1379 ret = -EINVAL;
1380 pr_err("%s: Invalid gpio = %d\n", __func__,
1381 curr->gpio[i].no);
1382 goto free_gpios;
1383 }
1384 if (enable) {
1385 ret = gpio_request(curr->gpio[i].no,
1386 curr->gpio[i].name);
1387 if (ret) {
1388 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1389 __func__, curr->gpio[i].no,
1390 curr->gpio[i].name, ret);
1391 goto free_gpios;
1392 }
1393 curr->gpio[i].is_enabled = true;
1394 } else {
1395 gpio_free(curr->gpio[i].no);
1396 curr->gpio[i].is_enabled = false;
1397 }
1398 }
1399 return ret;
1400
1401free_gpios:
1402 for (i--; i >= 0; i--) {
1403 gpio_free(curr->gpio[i].no);
1404 curr->gpio[i].is_enabled = false;
1405 }
1406 return ret;
1407}
1408
Can Guob903ad82017-10-17 13:22:53 +08001409static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
1410 unsigned int clock)
1411{
1412 int ret = 0;
1413
1414 if (clock > 150000000) {
1415 if (pdata->pctrl_data->pins_drv_type_200MHz)
1416 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1417 pdata->pctrl_data->pins_drv_type_200MHz);
1418 } else if (clock > 75000000) {
1419 if (pdata->pctrl_data->pins_drv_type_100MHz)
1420 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1421 pdata->pctrl_data->pins_drv_type_100MHz);
1422 } else if (clock > 400000) {
1423 if (pdata->pctrl_data->pins_drv_type_50MHz)
1424 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1425 pdata->pctrl_data->pins_drv_type_50MHz);
1426 } else {
1427 if (pdata->pctrl_data->pins_drv_type_400KHz)
1428 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1429 pdata->pctrl_data->pins_drv_type_400KHz);
1430 }
1431
1432 return ret;
1433}
1434
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301435static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1436 bool enable)
1437{
1438 int ret = 0;
1439
1440 if (enable)
1441 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1442 pdata->pctrl_data->pins_active);
1443 else
1444 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1445 pdata->pctrl_data->pins_sleep);
1446
1447 if (ret < 0)
1448 pr_err("%s state for pinctrl failed with %d\n",
1449 enable ? "Enabling" : "Disabling", ret);
1450
1451 return ret;
1452}
1453
Asutosh Das0ef24812012-12-18 16:14:02 +05301454static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1455{
1456 int ret = 0;
1457
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301458 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301459 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301460 } else if (pdata->pctrl_data) {
1461 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1462 goto out;
1463 } else if (!pdata->pin_data) {
1464 return 0;
1465 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301466
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301467 if (pdata->pin_data->is_gpio)
1468 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301469out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301470 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301471 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301472
1473 return ret;
1474}
1475
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301476static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1477 u32 **out, int *len, u32 size)
1478{
1479 int ret = 0;
1480 struct device_node *np = dev->of_node;
1481 size_t sz;
1482 u32 *arr = NULL;
1483
1484 if (!of_get_property(np, prop_name, len)) {
1485 ret = -EINVAL;
1486 goto out;
1487 }
1488 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001489 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301490 dev_err(dev, "%s invalid size\n", prop_name);
1491 ret = -EINVAL;
1492 goto out;
1493 }
1494
1495 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1496 if (!arr) {
1497 dev_err(dev, "%s failed allocating memory\n", prop_name);
1498 ret = -ENOMEM;
1499 goto out;
1500 }
1501
1502 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1503 if (ret < 0) {
1504 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1505 goto out;
1506 }
1507 *out = arr;
1508out:
1509 if (ret)
1510 *len = 0;
1511 return ret;
1512}
1513
Asutosh Das0ef24812012-12-18 16:14:02 +05301514#define MAX_PROP_SIZE 32
1515static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1516 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1517{
1518 int len, ret = 0;
1519 const __be32 *prop;
1520 char prop_name[MAX_PROP_SIZE];
1521 struct sdhci_msm_reg_data *vreg;
1522 struct device_node *np = dev->of_node;
1523
1524 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1525 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301526 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301527 return ret;
1528 }
1529
1530 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1531 if (!vreg) {
1532 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1533 ret = -ENOMEM;
1534 return ret;
1535 }
1536
1537 vreg->name = vreg_name;
1538
1539 snprintf(prop_name, MAX_PROP_SIZE,
1540 "qcom,%s-always-on", vreg_name);
1541 if (of_get_property(np, prop_name, NULL))
1542 vreg->is_always_on = true;
1543
1544 snprintf(prop_name, MAX_PROP_SIZE,
1545 "qcom,%s-lpm-sup", vreg_name);
1546 if (of_get_property(np, prop_name, NULL))
1547 vreg->lpm_sup = true;
1548
1549 snprintf(prop_name, MAX_PROP_SIZE,
1550 "qcom,%s-voltage-level", vreg_name);
1551 prop = of_get_property(np, prop_name, &len);
1552 if (!prop || (len != (2 * sizeof(__be32)))) {
1553 dev_warn(dev, "%s %s property\n",
1554 prop ? "invalid format" : "no", prop_name);
1555 } else {
1556 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1557 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1558 }
1559
1560 snprintf(prop_name, MAX_PROP_SIZE,
1561 "qcom,%s-current-level", vreg_name);
1562 prop = of_get_property(np, prop_name, &len);
1563 if (!prop || (len != (2 * sizeof(__be32)))) {
1564 dev_warn(dev, "%s %s property\n",
1565 prop ? "invalid format" : "no", prop_name);
1566 } else {
1567 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1568 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1569 }
1570
1571 *vreg_data = vreg;
1572 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1573 vreg->name, vreg->is_always_on ? "always_on," : "",
1574 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1575 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1576
1577 return ret;
1578}
1579
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301580static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1581 struct sdhci_msm_pltfm_data *pdata)
1582{
1583 struct sdhci_pinctrl_data *pctrl_data;
1584 struct pinctrl *pctrl;
1585 int ret = 0;
1586
1587 /* Try to obtain pinctrl handle */
1588 pctrl = devm_pinctrl_get(dev);
1589 if (IS_ERR(pctrl)) {
1590 ret = PTR_ERR(pctrl);
1591 goto out;
1592 }
1593 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1594 if (!pctrl_data) {
1595 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1596 ret = -ENOMEM;
1597 goto out;
1598 }
1599 pctrl_data->pctrl = pctrl;
1600 /* Look-up and keep the states handy to be used later */
1601 pctrl_data->pins_active = pinctrl_lookup_state(
1602 pctrl_data->pctrl, "active");
1603 if (IS_ERR(pctrl_data->pins_active)) {
1604 ret = PTR_ERR(pctrl_data->pins_active);
1605 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1606 goto out;
1607 }
1608 pctrl_data->pins_sleep = pinctrl_lookup_state(
1609 pctrl_data->pctrl, "sleep");
1610 if (IS_ERR(pctrl_data->pins_sleep)) {
1611 ret = PTR_ERR(pctrl_data->pins_sleep);
1612 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1613 goto out;
1614 }
Can Guob903ad82017-10-17 13:22:53 +08001615
1616 pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
1617 pctrl_data->pctrl, "ds_400KHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301618 if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001619 dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301620 pctrl_data->pins_drv_type_400KHz = NULL;
1621 }
Can Guob903ad82017-10-17 13:22:53 +08001622
1623 pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
1624 pctrl_data->pctrl, "ds_50MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301625 if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001626 dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301627 pctrl_data->pins_drv_type_50MHz = NULL;
1628 }
Can Guob903ad82017-10-17 13:22:53 +08001629
1630 pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
1631 pctrl_data->pctrl, "ds_100MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301632 if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001633 dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301634 pctrl_data->pins_drv_type_100MHz = NULL;
1635 }
Can Guob903ad82017-10-17 13:22:53 +08001636
1637 pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
1638 pctrl_data->pctrl, "ds_200MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301639 if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001640 dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301641 pctrl_data->pins_drv_type_200MHz = NULL;
1642 }
Can Guob903ad82017-10-17 13:22:53 +08001643
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301644 pdata->pctrl_data = pctrl_data;
1645out:
1646 return ret;
1647}
1648
Asutosh Das0ef24812012-12-18 16:14:02 +05301649#define GPIO_NAME_MAX_LEN 32
1650static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1651 struct sdhci_msm_pltfm_data *pdata)
1652{
1653 int ret = 0, cnt, i;
1654 struct sdhci_msm_pin_data *pin_data;
1655 struct device_node *np = dev->of_node;
1656
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301657 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1658 if (!ret) {
1659 goto out;
1660 } else if (ret == -EPROBE_DEFER) {
1661 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1662 goto out;
1663 } else {
1664 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1665 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301666 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301667 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301668 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1669 if (!pin_data) {
1670 dev_err(dev, "No memory for pin_data\n");
1671 ret = -ENOMEM;
1672 goto out;
1673 }
1674
1675 cnt = of_gpio_count(np);
1676 if (cnt > 0) {
1677 pin_data->gpio_data = devm_kzalloc(dev,
1678 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1679 if (!pin_data->gpio_data) {
1680 dev_err(dev, "No memory for gpio_data\n");
1681 ret = -ENOMEM;
1682 goto out;
1683 }
1684 pin_data->gpio_data->size = cnt;
1685 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1686 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1687
1688 if (!pin_data->gpio_data->gpio) {
1689 dev_err(dev, "No memory for gpio\n");
1690 ret = -ENOMEM;
1691 goto out;
1692 }
1693
1694 for (i = 0; i < cnt; i++) {
1695 const char *name = NULL;
1696 char result[GPIO_NAME_MAX_LEN];
1697 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1698 of_property_read_string_index(np,
1699 "qcom,gpio-names", i, &name);
1700
1701 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1702 dev_name(dev), name ? name : "?");
1703 pin_data->gpio_data->gpio[i].name = result;
1704 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1705 pin_data->gpio_data->gpio[i].name,
1706 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301707 }
1708 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301709 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301710out:
1711 if (ret)
1712 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1713 return ret;
1714}
1715
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001716#ifdef CONFIG_SMP
1717static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1718{
1719 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1720}
1721#else
1722static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1723#endif
1724
Gilad Bronerc788a672015-09-08 15:39:11 +03001725static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1726 struct sdhci_msm_pltfm_data *pdata)
1727{
1728 struct device_node *np = dev->of_node;
1729 const char *str;
1730 u32 cpu;
1731 int ret = 0;
1732 int i;
1733
1734 pdata->pm_qos_data.irq_valid = false;
1735 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1736 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1737 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001738 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001739 }
1740
1741 /* must specify cpu for "affine_cores" type */
1742 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1743 pdata->pm_qos_data.irq_cpu = -1;
1744 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1745 if (ret) {
1746 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1747 ret);
1748 goto out;
1749 }
1750 if (cpu < 0 || cpu >= num_possible_cpus()) {
1751 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1752 __func__, cpu, num_possible_cpus());
1753 ret = -EINVAL;
1754 goto out;
1755 }
1756 pdata->pm_qos_data.irq_cpu = cpu;
1757 }
1758
1759 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1760 SDHCI_POWER_POLICY_NUM) {
1761 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1762 __func__, SDHCI_POWER_POLICY_NUM);
1763 ret = -EINVAL;
1764 goto out;
1765 }
1766
1767 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1768 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1769 &pdata->pm_qos_data.irq_latency.latency[i]);
1770
1771 pdata->pm_qos_data.irq_valid = true;
1772out:
1773 return ret;
1774}
1775
1776static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1777 struct sdhci_msm_pltfm_data *pdata)
1778{
1779 struct device_node *np = dev->of_node;
1780 u32 mask;
1781 int nr_groups;
1782 int ret;
1783 int i;
1784
1785 /* Read cpu group mapping */
1786 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1787 if (nr_groups <= 0) {
1788 ret = -EINVAL;
1789 goto out;
1790 }
1791 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1792 pdata->pm_qos_data.cpu_group_map.mask =
1793 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1794 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1795 ret = -ENOMEM;
1796 goto out;
1797 }
1798
1799 for (i = 0; i < nr_groups; i++) {
1800 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1801 i, &mask);
1802
1803 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1804 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1805 cpu_possible_mask)) {
1806 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1807 __func__, mask, i);
1808 ret = -EINVAL;
1809 goto free_res;
1810 }
1811 }
1812 return 0;
1813
1814free_res:
1815 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1816out:
1817 return ret;
1818}
1819
1820static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1821 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1822{
1823 struct device_node *np = dev->of_node;
1824 struct sdhci_msm_pm_qos_latency *values;
1825 int ret;
1826 int i;
1827 int group;
1828 int cfg;
1829
1830 ret = of_property_count_u32_elems(np, name);
1831 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1832 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1833 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1834 ret);
1835 return -EINVAL;
1836 } else if (ret < 0) {
1837 return ret;
1838 }
1839
1840 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1841 GFP_KERNEL);
1842 if (!values)
1843 return -ENOMEM;
1844
1845 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1846 group = i / SDHCI_POWER_POLICY_NUM;
1847 cfg = i % SDHCI_POWER_POLICY_NUM;
1848 of_property_read_u32_index(np, name, i,
1849 &(values[group].latency[cfg]));
1850 }
1851
1852 *latency = values;
1853 return 0;
1854}
1855
1856static void sdhci_msm_pm_qos_parse(struct device *dev,
1857 struct sdhci_msm_pltfm_data *pdata)
1858{
1859 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1860 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1861 __func__);
1862
1863 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1864 pdata->pm_qos_data.cmdq_valid =
1865 !sdhci_msm_pm_qos_parse_latency(dev,
1866 "qcom,pm-qos-cmdq-latency-us",
1867 pdata->pm_qos_data.cpu_group_map.nr_groups,
1868 &pdata->pm_qos_data.cmdq_latency);
1869 pdata->pm_qos_data.legacy_valid =
1870 !sdhci_msm_pm_qos_parse_latency(dev,
1871 "qcom,pm-qos-legacy-latency-us",
1872 pdata->pm_qos_data.cpu_group_map.nr_groups,
1873 &pdata->pm_qos_data.latency);
1874 if (!pdata->pm_qos_data.cmdq_valid &&
1875 !pdata->pm_qos_data.legacy_valid) {
1876 /* clean-up previously allocated arrays */
1877 kfree(pdata->pm_qos_data.latency);
1878 kfree(pdata->pm_qos_data.cmdq_latency);
1879 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1880 __func__);
1881 }
1882 } else {
1883 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1884 __func__);
1885 }
1886}
1887
Asutosh Das0ef24812012-12-18 16:14:02 +05301888/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001889static
1890struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1891 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301892{
1893 struct sdhci_msm_pltfm_data *pdata = NULL;
1894 struct device_node *np = dev->of_node;
1895 u32 bus_width = 0;
1896 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301897 int clk_table_len;
1898 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301899 int ice_clk_table_len;
1900 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301901 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301902 const char *lower_bus_speed = NULL;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05301903 int bus_clk_table_len;
1904 u32 *bus_clk_table = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301905
1906 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1907 if (!pdata) {
1908 dev_err(dev, "failed to allocate memory for platform data\n");
1909 goto out;
1910 }
1911
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301912 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001913 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301914 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301915
Asutosh Das0ef24812012-12-18 16:14:02 +05301916 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1917 if (bus_width == 8)
1918 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1919 else if (bus_width == 4)
1920 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1921 else {
1922 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1923 pdata->mmc_bus_width = 0;
1924 }
1925
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001926 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301927 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1928 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001929 pr_debug("%s: no clock scaling frequencies were supplied\n",
1930 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301931 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1932 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1933 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001934
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301935 /*
1936 * Few hosts can support DDR52 mode at the same lower
1937 * system voltage corner as high-speed mode. In such cases,
1938 * it is always better to put it in DDR mode which will
1939 * improve the performance without any power impact.
1940 */
1941 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1942 &lower_bus_speed)) {
1943 if (!strcmp(lower_bus_speed, "DDR52"))
1944 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1945 MMC_SCALING_LOWER_DDR52_MODE;
1946 }
1947
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301948 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1949 &clk_table, &clk_table_len, 0)) {
1950 dev_err(dev, "failed parsing supported clock rates\n");
1951 goto out;
1952 }
1953 if (!clk_table || !clk_table_len) {
1954 dev_err(dev, "Invalid clock table\n");
1955 goto out;
1956 }
1957 pdata->sup_clk_table = clk_table;
1958 pdata->sup_clk_cnt = clk_table_len;
1959
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05301960 if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
1961 &bus_clk_table, &bus_clk_table_len, 0)) {
1962 if (bus_clk_table && bus_clk_table_len) {
1963 pdata->bus_clk_table = bus_clk_table;
1964 pdata->bus_clk_cnt = bus_clk_table_len;
1965 }
1966 }
1967
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301968 if (msm_host->ice.pdev) {
1969 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
1970 &ice_clk_table, &ice_clk_table_len, 0)) {
1971 dev_err(dev, "failed parsing supported ice clock rates\n");
1972 goto out;
1973 }
1974 if (!ice_clk_table || !ice_clk_table_len) {
1975 dev_err(dev, "Invalid clock table\n");
1976 goto out;
1977 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05301978 if (ice_clk_table_len != 2) {
1979 dev_err(dev, "Need max and min frequencies in the table\n");
1980 goto out;
1981 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301982 pdata->sup_ice_clk_table = ice_clk_table;
1983 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05301984 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
1985 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
1986 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
1987 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301988 }
1989
Asutosh Das0ef24812012-12-18 16:14:02 +05301990 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1991 sdhci_msm_slot_reg_data),
1992 GFP_KERNEL);
1993 if (!pdata->vreg_data) {
1994 dev_err(dev, "failed to allocate memory for vreg data\n");
1995 goto out;
1996 }
1997
1998 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1999 "vdd")) {
2000 dev_err(dev, "failed parsing vdd data\n");
2001 goto out;
2002 }
2003 if (sdhci_msm_dt_parse_vreg_info(dev,
2004 &pdata->vreg_data->vdd_io_data,
2005 "vdd-io")) {
2006 dev_err(dev, "failed parsing vdd-io data\n");
2007 goto out;
2008 }
2009
2010 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
2011 dev_err(dev, "failed parsing gpio data\n");
2012 goto out;
2013 }
2014
Asutosh Das0ef24812012-12-18 16:14:02 +05302015 len = of_property_count_strings(np, "qcom,bus-speed-mode");
2016
2017 for (i = 0; i < len; i++) {
2018 const char *name = NULL;
2019
2020 of_property_read_string_index(np,
2021 "qcom,bus-speed-mode", i, &name);
2022 if (!name)
2023 continue;
2024
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002025 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
2026 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
2027 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
2028 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
2029 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05302030 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2031 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
2032 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2033 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
2034 pdata->caps |= MMC_CAP_1_8V_DDR
2035 | MMC_CAP_UHS_DDR50;
2036 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
2037 pdata->caps |= MMC_CAP_1_2V_DDR
2038 | MMC_CAP_UHS_DDR50;
2039 }
2040
2041 if (of_get_property(np, "qcom,nonremovable", NULL))
2042 pdata->nonremovable = true;
2043
Guoping Yuf7c91332014-08-20 16:56:18 +08002044 if (of_get_property(np, "qcom,nonhotplug", NULL))
2045 pdata->nonhotplug = true;
2046
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08002047 pdata->largeaddressbus =
2048 of_property_read_bool(np, "qcom,large-address-bus");
2049
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002050 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
2051 msm_host->mmc->wakeup_on_idle = true;
2052
Gilad Bronerc788a672015-09-08 15:39:11 +03002053 sdhci_msm_pm_qos_parse(dev, pdata);
2054
Pavan Anamula5a256df2015-10-16 14:38:28 +05302055 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302056 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05302057
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002058 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002059 msm_host->regs_restore.is_supported =
2060 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002061
Vijay Viswanatha5492612017-10-17 15:38:55 +05302062 if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
2063 pdata->rclk_wa = true;
2064
Asutosh Das0ef24812012-12-18 16:14:02 +05302065 return pdata;
2066out:
2067 return NULL;
2068}
2069
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302070/* Returns required bandwidth in Bytes per Sec */
2071static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
2072 struct mmc_ios *ios)
2073{
Sahitya Tummala2886c922013-04-03 18:03:31 +05302074 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2075 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2076
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302077 unsigned int bw;
2078
Sahitya Tummala2886c922013-04-03 18:03:31 +05302079 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302080 /*
2081 * For DDR mode, SDCC controller clock will be at
2082 * the double rate than the actual clock that goes to card.
2083 */
2084 if (ios->bus_width == MMC_BUS_WIDTH_4)
2085 bw /= 2;
2086 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2087 bw /= 8;
2088
2089 return bw;
2090}
2091
2092static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2093 unsigned int bw)
2094{
2095 unsigned int *table = host->pdata->voting_data->bw_vecs;
2096 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2097 int i;
2098
2099 if (host->msm_bus_vote.is_max_bw_needed && bw)
2100 return host->msm_bus_vote.max_bw_vote;
2101
2102 for (i = 0; i < size; i++) {
2103 if (bw <= table[i])
2104 break;
2105 }
2106
2107 if (i && (i == size))
2108 i--;
2109
2110 return i;
2111}
2112
2113/*
2114 * This function must be called with host lock acquired.
2115 * Caller of this function should also ensure that msm bus client
2116 * handle is not null.
2117 */
2118static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2119 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302120 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302121{
2122 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2123 int rc = 0;
2124
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302125 BUG_ON(!flags);
2126
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302127 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302128 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302129 rc = msm_bus_scale_client_update_request(
2130 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302131 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302132 if (rc) {
2133 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2134 mmc_hostname(host->mmc),
2135 msm_host->msm_bus_vote.client_handle, vote, rc);
2136 goto out;
2137 }
2138 msm_host->msm_bus_vote.curr_vote = vote;
2139 }
2140out:
2141 return rc;
2142}
2143
2144/*
2145 * Internal work. Work to set 0 bandwidth for msm bus.
2146 */
2147static void sdhci_msm_bus_work(struct work_struct *work)
2148{
2149 struct sdhci_msm_host *msm_host;
2150 struct sdhci_host *host;
2151 unsigned long flags;
2152
2153 msm_host = container_of(work, struct sdhci_msm_host,
2154 msm_bus_vote.vote_work.work);
2155 host = platform_get_drvdata(msm_host->pdev);
2156
2157 if (!msm_host->msm_bus_vote.client_handle)
2158 return;
2159
2160 spin_lock_irqsave(&host->lock, flags);
2161 /* don't vote for 0 bandwidth if any request is in progress */
2162 if (!host->mrq) {
2163 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302164 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302165 } else
2166 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2167 mmc_hostname(host->mmc), __func__);
2168 spin_unlock_irqrestore(&host->lock, flags);
2169}
2170
2171/*
2172 * This function cancels any scheduled delayed work and sets the bus
2173 * vote based on bw (bandwidth) argument.
2174 */
2175static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2176 unsigned int bw)
2177{
2178 int vote;
2179 unsigned long flags;
2180 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2181 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2182
2183 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2184 spin_lock_irqsave(&host->lock, flags);
2185 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302186 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302187 spin_unlock_irqrestore(&host->lock, flags);
2188}
2189
2190#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2191
2192/* This function queues a work which will set the bandwidth requiement to 0 */
2193static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2194{
2195 unsigned long flags;
2196 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2197 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2198
2199 spin_lock_irqsave(&host->lock, flags);
2200 if (msm_host->msm_bus_vote.min_bw_vote !=
2201 msm_host->msm_bus_vote.curr_vote)
2202 queue_delayed_work(system_wq,
2203 &msm_host->msm_bus_vote.vote_work,
2204 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2205 spin_unlock_irqrestore(&host->lock, flags);
2206}
2207
2208static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2209 struct platform_device *pdev)
2210{
2211 int rc = 0;
2212 struct msm_bus_scale_pdata *bus_pdata;
2213
2214 struct sdhci_msm_bus_voting_data *data;
2215 struct device *dev = &pdev->dev;
2216
2217 data = devm_kzalloc(dev,
2218 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2219 if (!data) {
2220 dev_err(&pdev->dev,
2221 "%s: failed to allocate memory\n", __func__);
2222 rc = -ENOMEM;
2223 goto out;
2224 }
2225 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2226 if (data->bus_pdata) {
2227 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2228 &data->bw_vecs, &data->bw_vecs_size, 0);
2229 if (rc) {
2230 dev_err(&pdev->dev,
2231 "%s: Failed to get bus-bw-vectors-bps\n",
2232 __func__);
2233 goto out;
2234 }
2235 host->pdata->voting_data = data;
2236 }
2237 if (host->pdata->voting_data &&
2238 host->pdata->voting_data->bus_pdata &&
2239 host->pdata->voting_data->bw_vecs &&
2240 host->pdata->voting_data->bw_vecs_size) {
2241
2242 bus_pdata = host->pdata->voting_data->bus_pdata;
2243 host->msm_bus_vote.client_handle =
2244 msm_bus_scale_register_client(bus_pdata);
2245 if (!host->msm_bus_vote.client_handle) {
2246 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2247 rc = -EFAULT;
2248 goto out;
2249 }
2250 /* cache the vote index for minimum and maximum bandwidth */
2251 host->msm_bus_vote.min_bw_vote =
2252 sdhci_msm_bus_get_vote_for_bw(host, 0);
2253 host->msm_bus_vote.max_bw_vote =
2254 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2255 } else {
2256 devm_kfree(dev, data);
2257 }
2258
2259out:
2260 return rc;
2261}
2262
2263static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2264{
2265 if (host->msm_bus_vote.client_handle)
2266 msm_bus_scale_unregister_client(
2267 host->msm_bus_vote.client_handle);
2268}
2269
2270static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2271{
2272 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2273 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2274 struct mmc_ios *ios = &host->mmc->ios;
2275 unsigned int bw;
2276
2277 if (!msm_host->msm_bus_vote.client_handle)
2278 return;
2279
2280 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302281 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302282 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302283 } else {
2284 /*
2285 * If clock gating is enabled, then remove the vote
2286 * immediately because clocks will be disabled only
2287 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2288 * additional delay is required to remove the bus vote.
2289 */
2290#ifdef CONFIG_MMC_CLKGATE
2291 if (host->mmc->clkgate_delay)
2292 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2293 else
2294#endif
2295 sdhci_msm_bus_queue_work(host);
2296 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302297}
2298
Asutosh Das0ef24812012-12-18 16:14:02 +05302299/* Regulator utility functions */
2300static int sdhci_msm_vreg_init_reg(struct device *dev,
2301 struct sdhci_msm_reg_data *vreg)
2302{
2303 int ret = 0;
2304
2305 /* check if regulator is already initialized? */
2306 if (vreg->reg)
2307 goto out;
2308
2309 /* Get the regulator handle */
2310 vreg->reg = devm_regulator_get(dev, vreg->name);
2311 if (IS_ERR(vreg->reg)) {
2312 ret = PTR_ERR(vreg->reg);
2313 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2314 __func__, vreg->name, ret);
2315 goto out;
2316 }
2317
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302318 if (regulator_count_voltages(vreg->reg) > 0) {
2319 vreg->set_voltage_sup = true;
2320 /* sanity check */
2321 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2322 pr_err("%s: %s invalid constraints specified\n",
2323 __func__, vreg->name);
2324 ret = -EINVAL;
2325 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302326 }
2327
2328out:
2329 return ret;
2330}
2331
2332static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2333{
2334 if (vreg->reg)
2335 devm_regulator_put(vreg->reg);
2336}
2337
2338static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2339 *vreg, int uA_load)
2340{
2341 int ret = 0;
2342
2343 /*
2344 * regulators that do not support regulator_set_voltage also
2345 * do not support regulator_set_optimum_mode
2346 */
2347 if (vreg->set_voltage_sup) {
2348 ret = regulator_set_load(vreg->reg, uA_load);
2349 if (ret < 0)
2350 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2351 __func__, vreg->name, uA_load, ret);
2352 else
2353 /*
2354 * regulator_set_load() can return non zero
2355 * value even for success case.
2356 */
2357 ret = 0;
2358 }
2359 return ret;
2360}
2361
2362static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2363 int min_uV, int max_uV)
2364{
2365 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302366 if (vreg->set_voltage_sup) {
2367 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2368 if (ret) {
2369 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302370 __func__, vreg->name, min_uV, max_uV, ret);
2371 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302372 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302373
2374 return ret;
2375}
2376
2377static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2378{
2379 int ret = 0;
2380
2381 /* Put regulator in HPM (high power mode) */
2382 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2383 if (ret < 0)
2384 return ret;
2385
2386 if (!vreg->is_enabled) {
2387 /* Set voltage level */
2388 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2389 vreg->high_vol_level);
2390 if (ret)
2391 return ret;
2392 }
2393 ret = regulator_enable(vreg->reg);
2394 if (ret) {
2395 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2396 __func__, vreg->name, ret);
2397 return ret;
2398 }
2399 vreg->is_enabled = true;
2400 return ret;
2401}
2402
2403static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2404{
2405 int ret = 0;
2406
2407 /* Never disable regulator marked as always_on */
2408 if (vreg->is_enabled && !vreg->is_always_on) {
2409 ret = regulator_disable(vreg->reg);
2410 if (ret) {
2411 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2412 __func__, vreg->name, ret);
2413 goto out;
2414 }
2415 vreg->is_enabled = false;
2416
2417 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2418 if (ret < 0)
2419 goto out;
2420
2421 /* Set min. voltage level to 0 */
2422 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2423 if (ret)
2424 goto out;
2425 } else if (vreg->is_enabled && vreg->is_always_on) {
2426 if (vreg->lpm_sup) {
2427 /* Put always_on regulator in LPM (low power mode) */
2428 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2429 vreg->lpm_uA);
2430 if (ret < 0)
2431 goto out;
2432 }
2433 }
2434out:
2435 return ret;
2436}
2437
2438static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2439 bool enable, bool is_init)
2440{
2441 int ret = 0, i;
2442 struct sdhci_msm_slot_reg_data *curr_slot;
2443 struct sdhci_msm_reg_data *vreg_table[2];
2444
2445 curr_slot = pdata->vreg_data;
2446 if (!curr_slot) {
2447 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2448 __func__);
2449 goto out;
2450 }
2451
2452 vreg_table[0] = curr_slot->vdd_data;
2453 vreg_table[1] = curr_slot->vdd_io_data;
2454
2455 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2456 if (vreg_table[i]) {
2457 if (enable)
2458 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2459 else
2460 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2461 if (ret)
2462 goto out;
2463 }
2464 }
2465out:
2466 return ret;
2467}
2468
Asutosh Das0ef24812012-12-18 16:14:02 +05302469/* This init function should be called only once for each SDHC slot */
2470static int sdhci_msm_vreg_init(struct device *dev,
2471 struct sdhci_msm_pltfm_data *pdata,
2472 bool is_init)
2473{
2474 int ret = 0;
2475 struct sdhci_msm_slot_reg_data *curr_slot;
2476 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2477
2478 curr_slot = pdata->vreg_data;
2479 if (!curr_slot)
2480 goto out;
2481
2482 curr_vdd_reg = curr_slot->vdd_data;
2483 curr_vdd_io_reg = curr_slot->vdd_io_data;
2484
2485 if (!is_init)
2486 /* Deregister all regulators from regulator framework */
2487 goto vdd_io_reg_deinit;
2488
2489 /*
2490 * Get the regulator handle from voltage regulator framework
2491 * and then try to set the voltage level for the regulator
2492 */
2493 if (curr_vdd_reg) {
2494 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2495 if (ret)
2496 goto out;
2497 }
2498 if (curr_vdd_io_reg) {
2499 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2500 if (ret)
2501 goto vdd_reg_deinit;
2502 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302503
Asutosh Das0ef24812012-12-18 16:14:02 +05302504 if (ret)
2505 dev_err(dev, "vreg reset failed (%d)\n", ret);
2506 goto out;
2507
2508vdd_io_reg_deinit:
2509 if (curr_vdd_io_reg)
2510 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2511vdd_reg_deinit:
2512 if (curr_vdd_reg)
2513 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2514out:
2515 return ret;
2516}
2517
2518
2519static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2520 enum vdd_io_level level,
2521 unsigned int voltage_level)
2522{
2523 int ret = 0;
2524 int set_level;
2525 struct sdhci_msm_reg_data *vdd_io_reg;
2526
2527 if (!pdata->vreg_data)
2528 return ret;
2529
2530 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2531 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2532 switch (level) {
2533 case VDD_IO_LOW:
2534 set_level = vdd_io_reg->low_vol_level;
2535 break;
2536 case VDD_IO_HIGH:
2537 set_level = vdd_io_reg->high_vol_level;
2538 break;
2539 case VDD_IO_SET_LEVEL:
2540 set_level = voltage_level;
2541 break;
2542 default:
2543 pr_err("%s: invalid argument level = %d",
2544 __func__, level);
2545 ret = -EINVAL;
2546 return ret;
2547 }
2548 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2549 set_level);
2550 }
2551 return ret;
2552}
2553
Ritesh Harjani42876f42015-11-17 17:46:51 +05302554/*
2555 * Acquire spin-lock host->lock before calling this function
2556 */
2557static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2558 bool enable)
2559{
2560 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2561 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2562
2563 if (enable && !msm_host->is_sdiowakeup_enabled)
2564 enable_irq(msm_host->pdata->sdiowakeup_irq);
2565 else if (!enable && msm_host->is_sdiowakeup_enabled)
2566 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2567 else
2568 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2569 __func__, enable, msm_host->is_sdiowakeup_enabled);
2570 msm_host->is_sdiowakeup_enabled = enable;
2571}
2572
2573static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2574{
2575 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302576 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2577 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2578
Ritesh Harjani42876f42015-11-17 17:46:51 +05302579 unsigned long flags;
2580
2581 pr_debug("%s: irq (%d) received\n", __func__, irq);
2582
2583 spin_lock_irqsave(&host->lock, flags);
2584 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2585 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302586 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302587
2588 return IRQ_HANDLED;
2589}
2590
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302591void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2592{
2593 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2594 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302595 const struct sdhci_msm_offset *msm_host_offset =
2596 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302597 unsigned int irq_flags = 0;
2598 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302599
Siba Prasad0196fe42017-06-27 15:13:27 +05302600 if (pwr_irq_desc)
2601 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2602 state_use_accessors);
2603
2604 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302605 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302606 sdhci_msm_readl_relaxed(host,
2607 msm_host_offset->CORE_PWRCTL_STATUS),
2608 sdhci_msm_readl_relaxed(host,
2609 msm_host_offset->CORE_PWRCTL_MASK),
2610 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302611 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2612
2613 MMC_TRACE(host->mmc,
2614 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2615 __func__,
2616 sdhci_msm_readb_relaxed(host,
2617 msm_host_offset->CORE_PWRCTL_STATUS),
2618 sdhci_msm_readb_relaxed(host,
2619 msm_host_offset->CORE_PWRCTL_MASK),
2620 sdhci_msm_readb_relaxed(host,
2621 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302622}
2623
Asutosh Das0ef24812012-12-18 16:14:02 +05302624static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2625{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002626 struct sdhci_host *host = (struct sdhci_host *)data;
2627 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2628 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302629 const struct sdhci_msm_offset *msm_host_offset =
2630 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302631 u8 irq_status = 0;
2632 u8 irq_ack = 0;
2633 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302634 int pwr_state = 0, io_level = 0;
2635 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302636 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302637
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302638 irq_status = sdhci_msm_readb_relaxed(host,
2639 msm_host_offset->CORE_PWRCTL_STATUS);
2640
Asutosh Das0ef24812012-12-18 16:14:02 +05302641 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2642 mmc_hostname(msm_host->mmc), irq, irq_status);
2643
2644 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302645 sdhci_msm_writeb_relaxed(irq_status, host,
2646 msm_host_offset->CORE_PWRCTL_CLEAR);
2647
Asutosh Das0ef24812012-12-18 16:14:02 +05302648 /*
2649 * SDHC has core_mem and hc_mem device memory and these memory
2650 * addresses do not fall within 1KB region. Hence, any update to
2651 * core_mem address space would require an mb() to ensure this gets
2652 * completed before its next update to registers within hc_mem.
2653 */
2654 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302655 /*
2656 * There is a rare HW scenario where the first clear pulse could be
2657 * lost when actual reset and clear/read of status register is
2658 * happening at a time. Hence, retry for at least 10 times to make
2659 * sure status register is cleared. Otherwise, this will result in
2660 * a spurious power IRQ resulting in system instability.
2661 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302662 while (irq_status & sdhci_msm_readb_relaxed(host,
2663 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302664 if (retry == 0) {
2665 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2666 mmc_hostname(host->mmc), irq_status);
2667 sdhci_msm_dump_pwr_ctrl_regs(host);
2668 BUG_ON(1);
2669 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302670 sdhci_msm_writeb_relaxed(irq_status, host,
2671 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302672 retry--;
2673 udelay(10);
2674 }
2675 if (likely(retry < 10))
2676 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2677 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302678
2679 /* Handle BUS ON/OFF*/
2680 if (irq_status & CORE_PWRCTL_BUS_ON) {
2681 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302682 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302683 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302684 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2685 VDD_IO_HIGH, 0);
2686 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302687 if (ret)
2688 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2689 else
2690 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302691
2692 pwr_state = REQ_BUS_ON;
2693 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302694 }
2695 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302696 if (msm_host->pltfm_init_done)
2697 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2698 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302699 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302700 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302701 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2702 VDD_IO_LOW, 0);
2703 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302704 if (ret)
2705 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2706 else
2707 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302708
2709 pwr_state = REQ_BUS_OFF;
2710 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302711 }
2712 /* Handle IO LOW/HIGH */
2713 if (irq_status & CORE_PWRCTL_IO_LOW) {
2714 /* Switch voltage Low */
2715 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2716 if (ret)
2717 irq_ack |= CORE_PWRCTL_IO_FAIL;
2718 else
2719 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302720
2721 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302722 }
2723 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2724 /* Switch voltage High */
2725 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2726 if (ret)
2727 irq_ack |= CORE_PWRCTL_IO_FAIL;
2728 else
2729 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302730
2731 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302732 }
2733
2734 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302735 sdhci_msm_writeb_relaxed(irq_ack, host,
2736 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302737 /*
2738 * SDHC has core_mem and hc_mem device memory and these memory
2739 * addresses do not fall within 1KB region. Hence, any update to
2740 * core_mem address space would require an mb() to ensure this gets
2741 * completed before its next update to registers within hc_mem.
2742 */
2743 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302744 if ((io_level & REQ_IO_HIGH) &&
2745 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2746 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302747 writel_relaxed((readl_relaxed(host->ioaddr +
2748 msm_host_offset->CORE_VENDOR_SPEC) &
2749 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2750 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002751 else if ((io_level & REQ_IO_LOW) ||
2752 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302753 writel_relaxed((readl_relaxed(host->ioaddr +
2754 msm_host_offset->CORE_VENDOR_SPEC) |
2755 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2756 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002757 mb();
2758
Asutosh Das0ef24812012-12-18 16:14:02 +05302759 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2760 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302761 spin_lock_irqsave(&host->lock, flags);
2762 if (pwr_state)
2763 msm_host->curr_pwr_state = pwr_state;
2764 if (io_level)
2765 msm_host->curr_io_level = io_level;
2766 complete(&msm_host->pwr_irq_completion);
2767 spin_unlock_irqrestore(&host->lock, flags);
2768
Asutosh Das0ef24812012-12-18 16:14:02 +05302769 return IRQ_HANDLED;
2770}
2771
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302772static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302773show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2774{
2775 struct sdhci_host *host = dev_get_drvdata(dev);
2776 int poll;
2777 unsigned long flags;
2778
2779 spin_lock_irqsave(&host->lock, flags);
2780 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2781 spin_unlock_irqrestore(&host->lock, flags);
2782
2783 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2784}
2785
2786static ssize_t
2787store_polling(struct device *dev, struct device_attribute *attr,
2788 const char *buf, size_t count)
2789{
2790 struct sdhci_host *host = dev_get_drvdata(dev);
2791 int value;
2792 unsigned long flags;
2793
2794 if (!kstrtou32(buf, 0, &value)) {
2795 spin_lock_irqsave(&host->lock, flags);
2796 if (value) {
2797 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2798 mmc_detect_change(host->mmc, 0);
2799 } else {
2800 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2801 }
2802 spin_unlock_irqrestore(&host->lock, flags);
2803 }
2804 return count;
2805}
2806
2807static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302808show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2809 char *buf)
2810{
2811 struct sdhci_host *host = dev_get_drvdata(dev);
2812 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2813 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2814
2815 return snprintf(buf, PAGE_SIZE, "%u\n",
2816 msm_host->msm_bus_vote.is_max_bw_needed);
2817}
2818
2819static ssize_t
2820store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2821 const char *buf, size_t count)
2822{
2823 struct sdhci_host *host = dev_get_drvdata(dev);
2824 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2825 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2826 uint32_t value;
2827 unsigned long flags;
2828
2829 if (!kstrtou32(buf, 0, &value)) {
2830 spin_lock_irqsave(&host->lock, flags);
2831 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2832 spin_unlock_irqrestore(&host->lock, flags);
2833 }
2834 return count;
2835}
2836
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302837static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302838{
2839 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2840 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302841 const struct sdhci_msm_offset *msm_host_offset =
2842 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302843 unsigned long flags;
2844 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302845 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302846
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302847 spin_lock_irqsave(&host->lock, flags);
2848 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2849 mmc_hostname(host->mmc), __func__, req_type,
2850 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302851 if (!msm_host->mci_removed)
2852 io_sig_sts = sdhci_msm_readl_relaxed(host,
2853 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302854
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302855 /*
2856 * The IRQ for request type IO High/Low will be generated when -
2857 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2858 * 2. If 1 is true and when there is a state change in 1.8V enable
2859 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2860 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2861 * layer tries to set it to 3.3V before card detection happens, the
2862 * IRQ doesn't get triggered as there is no state change in this bit.
2863 * The driver already handles this case by changing the IO voltage
2864 * level to high as part of controller power up sequence. Hence, check
2865 * for host->pwr to handle a case where IO voltage high request is
2866 * issued even before controller power up.
2867 */
2868 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2869 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2870 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2871 pr_debug("%s: do not wait for power IRQ that never comes\n",
2872 mmc_hostname(host->mmc));
2873 spin_unlock_irqrestore(&host->lock, flags);
2874 return;
2875 }
2876 }
2877
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302878 if ((req_type & msm_host->curr_pwr_state) ||
2879 (req_type & msm_host->curr_io_level))
2880 done = true;
2881 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302882
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302883 /*
2884 * This is needed here to hanlde a case where IRQ gets
2885 * triggered even before this function is called so that
2886 * x->done counter of completion gets reset. Otherwise,
2887 * next call to wait_for_completion returns immediately
2888 * without actually waiting for the IRQ to be handled.
2889 */
2890 if (done)
2891 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302892 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05302893 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05302894 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2895 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05302896 MMC_TRACE(host->mmc,
2897 "%s: request(%d) timed out waiting for pwr_irq\n",
2898 __func__, req_type);
2899 sdhci_msm_dump_pwr_ctrl_regs(host);
2900 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302901 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2902 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302903}
2904
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002905static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2906{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302907 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2908 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2909 const struct sdhci_msm_offset *msm_host_offset =
2910 msm_host->offset;
2911 u32 config = readl_relaxed(host->ioaddr +
2912 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302913
2914 if (enable) {
2915 config |= CORE_CDR_EN;
2916 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302917 writel_relaxed(config, host->ioaddr +
2918 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302919 } else {
2920 config &= ~CORE_CDR_EN;
2921 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302922 writel_relaxed(config, host->ioaddr +
2923 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302924 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002925}
2926
Asutosh Das648f9d12013-01-10 21:11:04 +05302927static unsigned int sdhci_msm_max_segs(void)
2928{
2929 return SDHCI_MSM_MAX_SEGMENTS;
2930}
2931
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302932static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302933{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302934 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2935 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302936
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302937 return msm_host->pdata->sup_clk_table[0];
2938}
2939
2940static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2941{
2942 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2943 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2944 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2945
2946 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2947}
2948
2949static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2950 u32 req_clk)
2951{
2952 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2953 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2954 unsigned int sel_clk = -1;
2955 unsigned char cnt;
2956
2957 if (req_clk < sdhci_msm_get_min_clock(host)) {
2958 sel_clk = sdhci_msm_get_min_clock(host);
2959 return sel_clk;
2960 }
2961
2962 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2963 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2964 break;
2965 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2966 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2967 break;
2968 } else {
2969 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2970 }
2971 }
2972 return sel_clk;
2973}
2974
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302975static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
2976 u32 apps_clk)
2977{
2978 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2979 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2980 long sel_clk = -1;
2981 unsigned char cnt;
2982
2983 if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
2984 pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
2985 mmc_hostname(host->mmc), __func__,
2986 (unsigned int)msm_host->pdata->bus_clk_cnt,
2987 (unsigned int)msm_host->pdata->sup_clk_cnt);
2988 return msm_host->pdata->bus_clk_table[0];
2989 }
2990 if (apps_clk == sdhci_msm_get_min_clock(host)) {
2991 sel_clk = msm_host->pdata->bus_clk_table[0];
2992 return sel_clk;
2993 }
2994
2995 for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
2996 if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
2997 break;
2998 sel_clk = msm_host->pdata->bus_clk_table[cnt];
2999 }
3000 return sel_clk;
3001}
3002
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003003static void sdhci_msm_registers_save(struct sdhci_host *host)
3004{
3005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3006 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3007 const struct sdhci_msm_offset *msm_host_offset =
3008 msm_host->offset;
3009
3010 if (!msm_host->regs_restore.is_supported)
3011 return;
3012
3013 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
3014 msm_host_offset->CORE_VENDOR_SPEC);
3015 msm_host->regs_restore.vendor_pwrctl_mask =
3016 readl_relaxed(host->ioaddr +
3017 msm_host_offset->CORE_PWRCTL_MASK);
3018 msm_host->regs_restore.vendor_func2 =
3019 readl_relaxed(host->ioaddr +
3020 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3021 msm_host->regs_restore.vendor_func3 =
3022 readl_relaxed(host->ioaddr +
3023 msm_host_offset->CORE_VENDOR_SPEC3);
3024 msm_host->regs_restore.hc_2c_2e =
3025 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
3026 msm_host->regs_restore.hc_3c_3e =
3027 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
3028 msm_host->regs_restore.vendor_pwrctl_ctl =
3029 readl_relaxed(host->ioaddr +
3030 msm_host_offset->CORE_PWRCTL_CTL);
3031 msm_host->regs_restore.hc_38_3a =
3032 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
3033 msm_host->regs_restore.hc_34_36 =
3034 sdhci_readl(host, SDHCI_INT_ENABLE);
3035 msm_host->regs_restore.hc_28_2a =
3036 sdhci_readl(host, SDHCI_HOST_CONTROL);
3037 msm_host->regs_restore.vendor_caps_0 =
3038 readl_relaxed(host->ioaddr +
3039 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3040 msm_host->regs_restore.hc_caps_1 =
3041 sdhci_readl(host, SDHCI_CAPABILITIES_1);
3042 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
3043 msm_host_offset->CORE_TESTBUS_CONFIG);
3044 msm_host->regs_restore.is_valid = true;
3045
3046 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
3047 mmc_hostname(host->mmc), __func__,
3048 readl_relaxed(host->ioaddr +
3049 msm_host_offset->CORE_PWRCTL_MASK));
3050}
3051
3052static void sdhci_msm_registers_restore(struct sdhci_host *host)
3053{
3054 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3055 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3056 const struct sdhci_msm_offset *msm_host_offset =
3057 msm_host->offset;
3058
3059 if (!msm_host->regs_restore.is_supported ||
3060 !msm_host->regs_restore.is_valid)
3061 return;
3062
3063 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
3064 msm_host_offset->CORE_VENDOR_SPEC);
3065 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
3066 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
3067 writel_relaxed(msm_host->regs_restore.vendor_func2,
3068 host->ioaddr +
3069 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3070 writel_relaxed(msm_host->regs_restore.vendor_func3,
3071 host->ioaddr +
3072 msm_host_offset->CORE_VENDOR_SPEC3);
3073 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
3074 SDHCI_CLOCK_CONTROL);
3075 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
3076 SDHCI_AUTO_CMD_ERR);
3077 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
3078 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
3079 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
3080 SDHCI_SIGNAL_ENABLE);
3081 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
3082 SDHCI_INT_ENABLE);
3083 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
3084 SDHCI_HOST_CONTROL);
3085 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
3086 host->ioaddr +
3087 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3088 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
3089 SDHCI_CAPABILITIES_1);
3090 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
3091 msm_host_offset->CORE_TESTBUS_CONFIG);
3092 msm_host->regs_restore.is_valid = false;
3093
3094 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
3095 mmc_hostname(host->mmc), __func__,
3096 readl_relaxed(host->ioaddr +
3097 msm_host_offset->CORE_PWRCTL_MASK));
3098}
3099
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303100static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
3101{
3102 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3103 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3104 int rc = 0;
3105
3106 if (atomic_read(&msm_host->controller_clock))
3107 return 0;
3108
3109 sdhci_msm_bus_voting(host, 1);
3110
3111 if (!IS_ERR(msm_host->pclk)) {
3112 rc = clk_prepare_enable(msm_host->pclk);
3113 if (rc) {
3114 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3115 mmc_hostname(host->mmc), __func__, rc);
3116 goto remove_vote;
3117 }
3118 }
3119
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303120 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3121 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3122 if (rc) {
3123 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3124 mmc_hostname(host->mmc), __func__, rc);
3125 goto disable_pclk;
3126 }
3127 }
3128
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303129 rc = clk_prepare_enable(msm_host->clk);
3130 if (rc) {
3131 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3132 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303133 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303134 }
3135
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303136 if (!IS_ERR(msm_host->ice_clk)) {
3137 rc = clk_prepare_enable(msm_host->ice_clk);
3138 if (rc) {
3139 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3140 mmc_hostname(host->mmc), __func__, rc);
3141 goto disable_host_clk;
3142 }
3143 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303144 atomic_set(&msm_host->controller_clock, 1);
3145 pr_debug("%s: %s: enabled controller clock\n",
3146 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003147 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303148 goto out;
3149
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303150disable_host_clk:
3151 if (!IS_ERR(msm_host->clk))
3152 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303153disable_bus_aggr_clk:
3154 if (!IS_ERR(msm_host->bus_aggr_clk))
3155 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303156disable_pclk:
3157 if (!IS_ERR(msm_host->pclk))
3158 clk_disable_unprepare(msm_host->pclk);
3159remove_vote:
3160 if (msm_host->msm_bus_vote.client_handle)
3161 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3162out:
3163 return rc;
3164}
3165
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303166static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3167{
3168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3169 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303170
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303171 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003172 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303173 if (!IS_ERR(msm_host->clk))
3174 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303175 if (!IS_ERR(msm_host->ice_clk))
3176 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303177 if (!IS_ERR(msm_host->bus_aggr_clk))
3178 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303179 if (!IS_ERR(msm_host->pclk))
3180 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303181 sdhci_msm_bus_voting(host, 0);
3182 atomic_set(&msm_host->controller_clock, 0);
3183 pr_debug("%s: %s: disabled controller clock\n",
3184 mmc_hostname(host->mmc), __func__);
3185 }
3186}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303187
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303188static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3189{
3190 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3191 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3192 int rc = 0;
3193
3194 if (enable && !atomic_read(&msm_host->clks_on)) {
3195 pr_debug("%s: request to enable clocks\n",
3196 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303197
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303198 /*
3199 * The bus-width or the clock rate might have changed
3200 * after controller clocks are enbaled, update bus vote
3201 * in such case.
3202 */
3203 if (atomic_read(&msm_host->controller_clock))
3204 sdhci_msm_bus_voting(host, 1);
3205
3206 rc = sdhci_msm_enable_controller_clock(host);
3207 if (rc)
3208 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303209
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303210 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3211 rc = clk_prepare_enable(msm_host->bus_clk);
3212 if (rc) {
3213 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3214 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303215 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303216 }
3217 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003218 if (!IS_ERR(msm_host->ff_clk)) {
3219 rc = clk_prepare_enable(msm_host->ff_clk);
3220 if (rc) {
3221 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3222 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303223 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003224 }
3225 }
3226 if (!IS_ERR(msm_host->sleep_clk)) {
3227 rc = clk_prepare_enable(msm_host->sleep_clk);
3228 if (rc) {
3229 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3230 mmc_hostname(host->mmc), __func__, rc);
3231 goto disable_ff_clk;
3232 }
3233 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303234 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303235
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303236 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303237 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3238 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303239 /*
3240 * During 1.8V signal switching the clock source must
3241 * still be ON as it requires accessing SDHC
3242 * registers (SDHCi host control2 register bit 3 must
3243 * be written and polled after stopping the SDCLK).
3244 */
3245 if (host->mmc->card_clock_off)
3246 return 0;
3247 pr_debug("%s: request to disable clocks\n",
3248 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003249 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3250 clk_disable_unprepare(msm_host->sleep_clk);
3251 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3252 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303253 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3254 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003255 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303256 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303257 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303258 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003259disable_ff_clk:
3260 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3261 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303262disable_bus_clk:
3263 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3264 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303265disable_controller_clk:
3266 if (!IS_ERR_OR_NULL(msm_host->clk))
3267 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303268 if (!IS_ERR(msm_host->ice_clk))
3269 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303270 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3271 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303272 if (!IS_ERR_OR_NULL(msm_host->pclk))
3273 clk_disable_unprepare(msm_host->pclk);
3274 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303275remove_vote:
3276 if (msm_host->msm_bus_vote.client_handle)
3277 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303278out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303279 return rc;
3280}
3281
3282static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3283{
3284 int rc;
3285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3286 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303287 const struct sdhci_msm_offset *msm_host_offset =
3288 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003289 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303290 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003291 u32 sup_clock, ddr_clock, dll_lock;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303292 long bus_clk_rate;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303293 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303294
3295 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303296 /*
3297 * disable pwrsave to ensure clock is not auto-gated until
3298 * the rate is >400KHz (initialization complete).
3299 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303300 writel_relaxed(readl_relaxed(host->ioaddr +
3301 msm_host_offset->CORE_VENDOR_SPEC) &
3302 ~CORE_CLK_PWRSAVE, host->ioaddr +
3303 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303304 sdhci_msm_prepare_clocks(host, false);
3305 host->clock = clock;
3306 goto out;
3307 }
3308
3309 rc = sdhci_msm_prepare_clocks(host, true);
3310 if (rc)
3311 goto out;
3312
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303313 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3314 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303315 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003316 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303317 writel_relaxed(readl_relaxed(host->ioaddr +
3318 msm_host_offset->CORE_VENDOR_SPEC)
3319 | CORE_CLK_PWRSAVE, host->ioaddr +
3320 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303321 /*
3322 * Disable pwrsave for a newly added card if doesn't allow clock
3323 * gating.
3324 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003325 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303326 writel_relaxed(readl_relaxed(host->ioaddr +
3327 msm_host_offset->CORE_VENDOR_SPEC)
3328 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3329 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303330
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303331 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003332 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003333 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003334 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303335 /*
3336 * The SDHC requires internal clock frequency to be double the
3337 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003338 * uses the faster clock(100/400MHz) for some of its parts and
3339 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303340 */
3341 ddr_clock = clock * 2;
3342 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3343 ddr_clock);
3344 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003345
3346 /*
3347 * In general all timing modes are controlled via UHS mode select in
3348 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3349 * their respective modes defined here, hence we use these values.
3350 *
3351 * HS200 - SDR104 (Since they both are equivalent in functionality)
3352 * HS400 - This involves multiple configurations
3353 * Initially SDR104 - when tuning is required as HS200
3354 * Then when switching to DDR @ 400MHz (HS400) we use
3355 * the vendor specific HC_SELECT_IN to control the mode.
3356 *
3357 * In addition to controlling the modes we also need to select the
3358 * correct input clock for DLL depending on the mode.
3359 *
3360 * HS400 - divided clock (free running MCLK/2)
3361 * All other modes - default (free running MCLK)
3362 */
3363 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3364 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303365 writel_relaxed(((readl_relaxed(host->ioaddr +
3366 msm_host_offset->CORE_VENDOR_SPEC)
3367 & ~CORE_HC_MCLK_SEL_MASK)
3368 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3369 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003370 /*
3371 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3372 * register
3373 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303374 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003375 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303376 msm_host->enhanced_strobe)) &&
3377 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003378 /*
3379 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3380 * field in VENDOR_SPEC_FUNC
3381 */
3382 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303383 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003384 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303385 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3386 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003387 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003388 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3389 /*
3390 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3391 * CORE_DLL_STATUS to be set. This should get set
3392 * with in 15 us at 200 MHz.
3393 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303394 rc = readl_poll_timeout(host->ioaddr +
3395 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003396 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3397 CORE_DDR_DLL_LOCK)), 10, 1000);
3398 if (rc == -ETIMEDOUT)
3399 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3400 mmc_hostname(host->mmc),
3401 dll_lock);
3402 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003403 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003404 if (!msm_host->use_cdclp533)
3405 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3406 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303407 msm_host_offset->CORE_VENDOR_SPEC3)
3408 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3409 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003410
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003411 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303412 writel_relaxed(((readl_relaxed(host->ioaddr +
3413 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003414 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303415 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3416 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003417
3418 /*
3419 * Disable HC_SELECT_IN to be able to use the UHS mode select
3420 * configuration from Host Control2 register for all other
3421 * modes.
3422 *
3423 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3424 * in VENDOR_SPEC_FUNC
3425 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303426 writel_relaxed((readl_relaxed(host->ioaddr +
3427 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003428 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303429 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3430 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003431 }
3432 mb();
3433
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303434 if (sup_clock != msm_host->clk_rate) {
3435 pr_debug("%s: %s: setting clk rate to %u\n",
3436 mmc_hostname(host->mmc), __func__, sup_clock);
3437 rc = clk_set_rate(msm_host->clk, sup_clock);
3438 if (rc) {
3439 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3440 mmc_hostname(host->mmc), __func__,
3441 sup_clock, rc);
3442 goto out;
3443 }
3444 msm_host->clk_rate = sup_clock;
3445 host->clock = clock;
Can Guob903ad82017-10-17 13:22:53 +08003446
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303447 if (!IS_ERR(msm_host->bus_aggr_clk) &&
3448 msm_host->pdata->bus_clk_cnt) {
3449 bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
3450 sup_clock);
3451 if (bus_clk_rate >= 0) {
3452 rc = clk_set_rate(msm_host->bus_aggr_clk,
3453 bus_clk_rate);
3454 if (rc) {
3455 pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
3456 mmc_hostname(host->mmc),
3457 __func__, bus_clk_rate, rc);
3458 goto out;
3459 }
3460 } else {
3461 pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
3462 mmc_hostname(host->mmc), __func__,
3463 sup_clock, bus_clk_rate);
3464 }
3465 }
3466
Can Guob903ad82017-10-17 13:22:53 +08003467 /* Configure pinctrl drive type according to
3468 * current clock rate
3469 */
3470 rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
3471 if (rc)
3472 pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
3473 mmc_hostname(host->mmc), __func__,
3474 clock, rc);
3475
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303476 /*
3477 * Update the bus vote in case of frequency change due to
3478 * clock scaling.
3479 */
3480 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303481 }
3482out:
3483 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303484}
3485
Sahitya Tummala14613432013-03-21 11:13:25 +05303486static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3487 unsigned int uhs)
3488{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003489 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3490 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303491 const struct sdhci_msm_offset *msm_host_offset =
3492 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303493 u16 ctrl_2;
3494
3495 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3496 /* Select Bus Speed Mode for host */
3497 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003498 if ((uhs == MMC_TIMING_MMC_HS400) ||
3499 (uhs == MMC_TIMING_MMC_HS200) ||
3500 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303501 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3502 else if (uhs == MMC_TIMING_UHS_SDR12)
3503 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3504 else if (uhs == MMC_TIMING_UHS_SDR25)
3505 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3506 else if (uhs == MMC_TIMING_UHS_SDR50)
3507 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003508 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3509 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303510 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303511 /*
3512 * When clock frquency is less than 100MHz, the feedback clock must be
3513 * provided and DLL must not be used so that tuning can be skipped. To
3514 * provide feedback clock, the mode selection can be any value less
3515 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3516 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003517 if (host->clock <= CORE_FREQ_100MHZ) {
3518 if ((uhs == MMC_TIMING_MMC_HS400) ||
3519 (uhs == MMC_TIMING_MMC_HS200) ||
3520 (uhs == MMC_TIMING_UHS_SDR104))
3521 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303522
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003523 /*
3524 * Make sure DLL is disabled when not required
3525 *
3526 * Write 1 to DLL_RST bit of DLL_CONFIG register
3527 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303528 writel_relaxed((readl_relaxed(host->ioaddr +
3529 msm_host_offset->CORE_DLL_CONFIG)
3530 | CORE_DLL_RST), host->ioaddr +
3531 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003532
3533 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303534 writel_relaxed((readl_relaxed(host->ioaddr +
3535 msm_host_offset->CORE_DLL_CONFIG)
3536 | CORE_DLL_PDN), host->ioaddr +
3537 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003538 mb();
3539
3540 /*
3541 * The DLL needs to be restored and CDCLP533 recalibrated
3542 * when the clock frequency is set back to 400MHz.
3543 */
3544 msm_host->calibration_done = false;
3545 }
3546
3547 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3548 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303549 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3550
3551}
3552
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003553#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003554#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303555static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003556{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303557 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303558 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3559 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303560 const struct sdhci_msm_offset *msm_host_offset =
3561 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303562 struct cmdq_host *cq_host = host->cq_host;
3563
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303564 u32 version = sdhci_msm_readl_relaxed(host,
3565 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003566 u16 minor = version & CORE_VERSION_TARGET_MASK;
3567 /* registers offset changed starting from 4.2.0 */
3568 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3569
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303570 if (cq_host->offset_changed)
3571 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003572 pr_err("---- Debug RAM dump ----\n");
3573 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3574 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3575 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3576
3577 while (i < 16) {
3578 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3579 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3580 i++;
3581 }
3582 pr_err("-------------------------\n");
3583}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303584
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303585static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3586{
3587 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3588 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3589 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3590
3591 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3592 sizeof(struct mmc_host));
3593 if (msm_host->mmc->card)
3594 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3595 sizeof(struct mmc_card));
3596 memcpy(&cached_data->copy_host, host,
3597 sizeof(struct sdhci_host));
3598}
3599
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303600void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3601{
3602 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3603 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303604 const struct sdhci_msm_offset *msm_host_offset =
3605 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303606 int tbsel, tbsel2;
3607 int i, index = 0;
3608 u32 test_bus_val = 0;
3609 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303610 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303611
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303612 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303613 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003614 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303615 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003616
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303617 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3618 sdhci_msm_readl_relaxed(host,
3619 msm_host_offset->CORE_MCI_DATA_CNT),
3620 sdhci_msm_readl_relaxed(host,
3621 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303622 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303623 sdhci_msm_readl_relaxed(host,
3624 msm_host_offset->CORE_MCI_DATA_CNT),
3625 sdhci_msm_readl_relaxed(host,
3626 msm_host_offset->CORE_MCI_FIFO_CNT),
3627 sdhci_msm_readl_relaxed(host,
3628 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303629 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303630 readl_relaxed(host->ioaddr +
3631 msm_host_offset->CORE_DLL_CONFIG),
3632 readl_relaxed(host->ioaddr +
3633 msm_host_offset->CORE_DLL_STATUS),
3634 sdhci_msm_readl_relaxed(host,
3635 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303636 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303637 readl_relaxed(host->ioaddr +
3638 msm_host_offset->CORE_VENDOR_SPEC),
3639 readl_relaxed(host->ioaddr +
3640 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3641 readl_relaxed(host->ioaddr +
3642 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303643 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303644 readl_relaxed(host->ioaddr +
3645 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303646
3647 /*
3648 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3649 * of CORE_TESTBUS_CONFIG register.
3650 *
3651 * To select test bus 0 to 7 use tbsel and to select any test bus
3652 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3653 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3654 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3655 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003656 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303657 for (tbsel = 0; tbsel < 8; tbsel++) {
3658 if (index >= MAX_TEST_BUS)
3659 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303660 test_bus_val =
3661 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3662 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3663 sdhci_msm_writel_relaxed(test_bus_val, host,
3664 msm_host_offset->CORE_TESTBUS_CONFIG);
3665 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3666 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303667 }
3668 }
3669 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3670 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3671 i, i + 3, debug_reg[i], debug_reg[i+1],
3672 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303673 if (host->is_crypto_en) {
3674 sdhci_msm_ice_get_status(host, &sts);
3675 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003676 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303677 }
3678}
3679
3680static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3681{
3682 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3683 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3684
3685 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303686 if (msm_host->ice.pdev) {
3687 if (msm_host->ice_hci_support)
3688 writel_relaxed(1, host->ioaddr +
3689 HC_VENDOR_SPECIFIC_ICE_CTRL);
3690 else
3691 writel_relaxed(1,
3692 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3693 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303694
3695 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003696}
3697
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303698/*
3699 * sdhci_msm_enhanced_strobe_mask :-
3700 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3701 * SW should write 3 to
3702 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3703 * The default reset value of this register is 2.
3704 */
3705static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3706{
3707 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3708 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303709 const struct sdhci_msm_offset *msm_host_offset =
3710 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303711
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303712 if (!msm_host->enhanced_strobe ||
3713 !mmc_card_strobe(msm_host->mmc->card)) {
3714 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303715 mmc_hostname(host->mmc));
3716 return;
3717 }
3718
3719 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303720 writel_relaxed((readl_relaxed(host->ioaddr +
3721 msm_host_offset->CORE_VENDOR_SPEC3)
3722 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3723 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303724 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303725 writel_relaxed((readl_relaxed(host->ioaddr +
3726 msm_host_offset->CORE_VENDOR_SPEC3)
3727 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3728 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303729 }
3730}
3731
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003732static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3733{
3734 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3735 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303736 const struct sdhci_msm_offset *msm_host_offset =
3737 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003738
3739 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303740 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3741 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003742 } else {
3743 u32 value;
3744
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303745 value = sdhci_msm_readl_relaxed(host,
3746 msm_host_offset->CORE_TESTBUS_CONFIG);
3747 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3748 sdhci_msm_writel_relaxed(value, host,
3749 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003750 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303751}
3752
Pavan Anamula691dd592015-08-25 16:11:20 +05303753void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3754{
3755 u32 vendor_func2;
3756 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303757 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3758 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3759 const struct sdhci_msm_offset *msm_host_offset =
3760 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303761
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303762 vendor_func2 = readl_relaxed(host->ioaddr +
3763 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303764
3765 if (enable) {
3766 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303767 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303768 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303769 while (readl_relaxed(host->ioaddr +
3770 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303771 if (timeout == 0) {
3772 pr_info("%s: Applying wait idle disable workaround\n",
3773 mmc_hostname(host->mmc));
3774 /*
3775 * Apply the reset workaround to not wait for
3776 * pending data transfers on AXI before
3777 * resetting the controller. This could be
3778 * risky if the transfers were stuck on the
3779 * AXI bus.
3780 */
3781 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303782 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303783 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303784 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3785 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303786 host->reset_wa_t = ktime_get();
3787 return;
3788 }
3789 timeout--;
3790 udelay(10);
3791 }
3792 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3793 mmc_hostname(host->mmc));
3794 } else {
3795 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303796 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303797 }
3798}
3799
Gilad Broner44445992015-09-29 16:05:39 +03003800static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3801{
3802 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303803 container_of(work, struct sdhci_msm_pm_qos_irq,
3804 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003805
3806 if (atomic_read(&pm_qos_irq->counter))
3807 return;
3808
3809 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3810 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3811}
3812
3813void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3814{
3815 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3816 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3817 struct sdhci_msm_pm_qos_latency *latency =
3818 &msm_host->pdata->pm_qos_data.irq_latency;
3819 int counter;
3820
3821 if (!msm_host->pm_qos_irq.enabled)
3822 return;
3823
3824 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3825 /* Make sure to update the voting in case power policy has changed */
3826 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3827 && counter > 1)
3828 return;
3829
Asutosh Das36c2e922015-12-01 12:19:58 +05303830 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003831 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3832 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3833 msm_host->pm_qos_irq.latency);
3834}
3835
3836void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3837{
3838 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3839 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3840 int counter;
3841
3842 if (!msm_host->pm_qos_irq.enabled)
3843 return;
3844
Subhash Jadavani4d813902015-10-15 12:16:43 -07003845 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3846 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3847 } else {
3848 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3849 return;
Gilad Broner44445992015-09-29 16:05:39 +03003850 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003851
Gilad Broner44445992015-09-29 16:05:39 +03003852 if (counter)
3853 return;
3854
3855 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303856 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3857 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003858 return;
3859 }
3860
3861 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3862 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3863 msm_host->pm_qos_irq.latency);
3864}
3865
Gilad Broner68c54562015-09-20 11:59:46 +03003866static ssize_t
3867sdhci_msm_pm_qos_irq_show(struct device *dev,
3868 struct device_attribute *attr, char *buf)
3869{
3870 struct sdhci_host *host = dev_get_drvdata(dev);
3871 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3872 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3873 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3874
3875 return snprintf(buf, PAGE_SIZE,
3876 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3877 irq->enabled, atomic_read(&irq->counter), irq->latency);
3878}
3879
3880static ssize_t
3881sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3882 struct device_attribute *attr, char *buf)
3883{
3884 struct sdhci_host *host = dev_get_drvdata(dev);
3885 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3886 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3887
3888 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3889}
3890
3891static ssize_t
3892sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3893 struct device_attribute *attr, const char *buf, size_t count)
3894{
3895 struct sdhci_host *host = dev_get_drvdata(dev);
3896 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3897 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3898 uint32_t value;
3899 bool enable;
3900 int ret;
3901
3902 ret = kstrtou32(buf, 0, &value);
3903 if (ret)
3904 goto out;
3905 enable = !!value;
3906
3907 if (enable == msm_host->pm_qos_irq.enabled)
3908 goto out;
3909
3910 msm_host->pm_qos_irq.enabled = enable;
3911 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303912 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003913 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3914 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3915 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3916 msm_host->pm_qos_irq.latency);
3917 }
3918
3919out:
3920 return count;
3921}
3922
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003923#ifdef CONFIG_SMP
3924static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3925 struct sdhci_host *host)
3926{
3927 msm_host->pm_qos_irq.req.irq = host->irq;
3928}
3929#else
3930static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3931 struct sdhci_host *host) { }
3932#endif
3933
Gilad Broner44445992015-09-29 16:05:39 +03003934void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3935{
3936 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3937 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3938 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003939 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003940
3941 if (!msm_host->pdata->pm_qos_data.irq_valid)
3942 return;
3943
3944 /* Initialize only once as this gets called per partition */
3945 if (msm_host->pm_qos_irq.enabled)
3946 return;
3947
3948 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3949 msm_host->pm_qos_irq.req.type =
3950 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003951 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3952 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3953 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003954 else
3955 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3956 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3957
Asutosh Das36c2e922015-12-01 12:19:58 +05303958 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003959 sdhci_msm_pm_qos_irq_unvote_work);
3960 /* For initialization phase, set the performance latency */
3961 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3962 msm_host->pm_qos_irq.latency =
3963 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3964 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3965 msm_host->pm_qos_irq.latency);
3966 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003967
3968 /* sysfs */
3969 msm_host->pm_qos_irq.enable_attr.show =
3970 sdhci_msm_pm_qos_irq_enable_show;
3971 msm_host->pm_qos_irq.enable_attr.store =
3972 sdhci_msm_pm_qos_irq_enable_store;
3973 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3974 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3975 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3976 ret = device_create_file(&msm_host->pdev->dev,
3977 &msm_host->pm_qos_irq.enable_attr);
3978 if (ret)
3979 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3980 __func__, ret);
3981
3982 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3983 msm_host->pm_qos_irq.status_attr.store = NULL;
3984 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3985 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3986 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3987 ret = device_create_file(&msm_host->pdev->dev,
3988 &msm_host->pm_qos_irq.status_attr);
3989 if (ret)
3990 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3991 __func__, ret);
3992}
3993
3994static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3995 struct device_attribute *attr, char *buf)
3996{
3997 struct sdhci_host *host = dev_get_drvdata(dev);
3998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4000 struct sdhci_msm_pm_qos_group *group;
4001 int i;
4002 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4003 int offset = 0;
4004
4005 for (i = 0; i < nr_groups; i++) {
4006 group = &msm_host->pm_qos[i];
4007 offset += snprintf(&buf[offset], PAGE_SIZE,
4008 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
4009 i, group->req.cpus_affine.bits[0],
4010 msm_host->pm_qos_group_enable,
4011 atomic_read(&group->counter),
4012 group->latency);
4013 }
4014
4015 return offset;
4016}
4017
4018static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
4019 struct device_attribute *attr, char *buf)
4020{
4021 struct sdhci_host *host = dev_get_drvdata(dev);
4022 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4023 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4024
4025 return snprintf(buf, PAGE_SIZE, "%s\n",
4026 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
4027}
4028
4029static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
4030 struct device_attribute *attr, const char *buf, size_t count)
4031{
4032 struct sdhci_host *host = dev_get_drvdata(dev);
4033 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4034 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4035 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4036 uint32_t value;
4037 bool enable;
4038 int ret;
4039 int i;
4040
4041 ret = kstrtou32(buf, 0, &value);
4042 if (ret)
4043 goto out;
4044 enable = !!value;
4045
4046 if (enable == msm_host->pm_qos_group_enable)
4047 goto out;
4048
4049 msm_host->pm_qos_group_enable = enable;
4050 if (!enable) {
4051 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304052 cancel_delayed_work_sync(
4053 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004054 atomic_set(&msm_host->pm_qos[i].counter, 0);
4055 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
4056 pm_qos_update_request(&msm_host->pm_qos[i].req,
4057 msm_host->pm_qos[i].latency);
4058 }
4059 }
4060
4061out:
4062 return count;
Gilad Broner44445992015-09-29 16:05:39 +03004063}
4064
4065static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
4066{
4067 int i;
4068 struct sdhci_msm_cpu_group_map *map =
4069 &msm_host->pdata->pm_qos_data.cpu_group_map;
4070
4071 if (cpu < 0)
4072 goto not_found;
4073
4074 for (i = 0; i < map->nr_groups; i++)
4075 if (cpumask_test_cpu(cpu, &map->mask[i]))
4076 return i;
4077
4078not_found:
4079 return -EINVAL;
4080}
4081
4082void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
4083 struct sdhci_msm_pm_qos_latency *latency, int cpu)
4084{
4085 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4086 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4087 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4088 struct sdhci_msm_pm_qos_group *pm_qos_group;
4089 int counter;
4090
4091 if (!msm_host->pm_qos_group_enable || group < 0)
4092 return;
4093
4094 pm_qos_group = &msm_host->pm_qos[group];
4095 counter = atomic_inc_return(&pm_qos_group->counter);
4096
4097 /* Make sure to update the voting in case power policy has changed */
4098 if (pm_qos_group->latency == latency->latency[host->power_policy]
4099 && counter > 1)
4100 return;
4101
Asutosh Das36c2e922015-12-01 12:19:58 +05304102 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004103
4104 pm_qos_group->latency = latency->latency[host->power_policy];
4105 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
4106}
4107
4108static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
4109{
4110 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05304111 container_of(work, struct sdhci_msm_pm_qos_group,
4112 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004113
4114 if (atomic_read(&group->counter))
4115 return;
4116
4117 group->latency = PM_QOS_DEFAULT_VALUE;
4118 pm_qos_update_request(&group->req, group->latency);
4119}
4120
Gilad Broner07d92eb2015-09-29 16:57:21 +03004121bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03004122{
4123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4124 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4125 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4126
4127 if (!msm_host->pm_qos_group_enable || group < 0 ||
4128 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03004129 return false;
Gilad Broner44445992015-09-29 16:05:39 +03004130
4131 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304132 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
4133 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03004134 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004135 }
4136
4137 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4138 pm_qos_update_request(&msm_host->pm_qos[group].req,
4139 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004140 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004141}
4142
4143void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4144 struct sdhci_msm_pm_qos_latency *latency)
4145{
4146 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4147 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4148 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4149 struct sdhci_msm_pm_qos_group *group;
4150 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004151 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004152
4153 if (msm_host->pm_qos_group_enable)
4154 return;
4155
4156 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4157 GFP_KERNEL);
4158 if (!msm_host->pm_qos)
4159 return;
4160
4161 for (i = 0; i < nr_groups; i++) {
4162 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304163 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004164 sdhci_msm_pm_qos_cpu_unvote_work);
4165 atomic_set(&group->counter, 0);
4166 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4167 cpumask_copy(&group->req.cpus_affine,
4168 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304169 /* We set default latency here for all pm_qos cpu groups. */
4170 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004171 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4172 group->latency);
4173 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
4174 __func__, i,
4175 group->req.cpus_affine.bits[0],
4176 group->latency,
4177 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
4178 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004179 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004180 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004181
4182 /* sysfs */
4183 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4184 msm_host->pm_qos_group_status_attr.store = NULL;
4185 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4186 msm_host->pm_qos_group_status_attr.attr.name =
4187 "pm_qos_cpu_groups_status";
4188 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4189 ret = device_create_file(&msm_host->pdev->dev,
4190 &msm_host->pm_qos_group_status_attr);
4191 if (ret)
4192 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4193 __func__, ret);
4194 msm_host->pm_qos_group_enable_attr.show =
4195 sdhci_msm_pm_qos_group_enable_show;
4196 msm_host->pm_qos_group_enable_attr.store =
4197 sdhci_msm_pm_qos_group_enable_store;
4198 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4199 msm_host->pm_qos_group_enable_attr.attr.name =
4200 "pm_qos_cpu_groups_enable";
4201 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4202 ret = device_create_file(&msm_host->pdev->dev,
4203 &msm_host->pm_qos_group_enable_attr);
4204 if (ret)
4205 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4206 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004207}
4208
Gilad Broner07d92eb2015-09-29 16:57:21 +03004209static void sdhci_msm_pre_req(struct sdhci_host *host,
4210 struct mmc_request *mmc_req)
4211{
4212 int cpu;
4213 int group;
4214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4215 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4216 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4217 msm_host->pm_qos_prev_cpu);
4218
4219 sdhci_msm_pm_qos_irq_vote(host);
4220
4221 cpu = get_cpu();
4222 put_cpu();
4223 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4224 if (group < 0)
4225 return;
4226
4227 if (group != prev_group && prev_group >= 0) {
4228 sdhci_msm_pm_qos_cpu_unvote(host,
4229 msm_host->pm_qos_prev_cpu, false);
4230 prev_group = -1; /* make sure to vote for new group */
4231 }
4232
4233 if (prev_group < 0) {
4234 sdhci_msm_pm_qos_cpu_vote(host,
4235 msm_host->pdata->pm_qos_data.latency, cpu);
4236 msm_host->pm_qos_prev_cpu = cpu;
4237 }
4238}
4239
4240static void sdhci_msm_post_req(struct sdhci_host *host,
4241 struct mmc_request *mmc_req)
4242{
4243 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4244 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4245
4246 sdhci_msm_pm_qos_irq_unvote(host, false);
4247
4248 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4249 msm_host->pm_qos_prev_cpu = -1;
4250}
4251
4252static void sdhci_msm_init(struct sdhci_host *host)
4253{
4254 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4255 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4256
4257 sdhci_msm_pm_qos_irq_init(host);
4258
4259 if (msm_host->pdata->pm_qos_data.legacy_valid)
4260 sdhci_msm_pm_qos_cpu_init(host,
4261 msm_host->pdata->pm_qos_data.latency);
4262}
4263
Sahitya Tummala9150a942014-10-31 15:33:04 +05304264static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4265{
4266 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4267 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4268 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4269 u32 max_curr = 0;
4270
4271 if (curr_slot && curr_slot->vdd_data)
4272 max_curr = curr_slot->vdd_data->hpm_uA;
4273
4274 return max_curr;
4275}
4276
Sahitya Tummala073ca552015-08-06 13:59:37 +05304277static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4278{
4279 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4280 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4281 int ret = 0;
4282 u32 clk_rate = 0;
4283
4284 if (!IS_ERR(msm_host->ice_clk)) {
4285 clk_rate = (state == MMC_LOAD_LOW) ?
4286 msm_host->pdata->ice_clk_min :
4287 msm_host->pdata->ice_clk_max;
4288 if (msm_host->ice_clk_rate == clk_rate)
4289 return 0;
4290 pr_debug("%s: changing ICE clk rate to %u\n",
4291 mmc_hostname(host->mmc), clk_rate);
4292 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4293 if (ret) {
4294 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4295 mmc_hostname(host->mmc), ret, clk_rate);
4296 return ret;
4297 }
4298 msm_host->ice_clk_rate = clk_rate;
4299 }
4300 return 0;
4301}
4302
Asutosh Das0ef24812012-12-18 16:14:02 +05304303static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304304 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304305 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304306 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304307 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304308 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304309 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304310 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004311 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304312 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004313 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304314 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304315 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304316 .get_min_clock = sdhci_msm_get_min_clock,
4317 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304318 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304319 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304320 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004321 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304322 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004323 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304324 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304325 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004326 .init = sdhci_msm_init,
4327 .pre_req = sdhci_msm_pre_req,
4328 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304329 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304330 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304331};
4332
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304333static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4334 struct sdhci_host *host)
4335{
Krishna Konda46fd1432014-10-30 21:13:27 -07004336 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304337 u16 minor;
4338 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304339 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304340 const struct sdhci_msm_offset *msm_host_offset =
4341 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304342
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304343 version = sdhci_msm_readl_relaxed(host,
4344 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304345 major = (version & CORE_VERSION_MAJOR_MASK) >>
4346 CORE_VERSION_MAJOR_SHIFT;
4347 minor = version & CORE_VERSION_TARGET_MASK;
4348
Krishna Konda46fd1432014-10-30 21:13:27 -07004349 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4350
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304351 /*
4352 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004353 * controller won't advertise 3.0v, 1.8v and 8-bit features
4354 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304355 */
4356 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004357 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004358 /*
4359 * Enable 1.8V support capability on controllers that
4360 * support dual voltage
4361 */
4362 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004363 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4364 caps |= CORE_3_0V_SUPPORT;
4365 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004366 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304367 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4368 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304369 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004370
4371 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304372 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4373 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4374 */
4375 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304376 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304377 val = readl_relaxed(host->ioaddr +
4378 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304379 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304380 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304381 }
4382 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004383 * SDCC 5 controller with major version 1, minor version 0x34 and later
4384 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4385 */
4386 if ((major == 1) && (minor < 0x34))
4387 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004388
4389 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004390 * SDCC 5 controller with major version 1, minor version 0x42 and later
4391 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304392 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004393 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304394 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004395 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304396 msm_host->enhanced_strobe = true;
4397 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004398
4399 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004400 * SDCC 5 controller with major version 1 and minor version 0x42,
4401 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4402 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304403 * when MCLK is gated OFF, it is not gated for less than 0.5us
4404 * and MCLK must be switched on for at-least 1us before DATA
4405 * starts coming.
4406 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004407 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badiganti06d2c8c2017-09-12 17:24:09 +05304408 (minor == 0x49) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304409 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004410
Pavan Anamula5a256df2015-10-16 14:38:28 +05304411 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304412 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304413 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304414 writel_relaxed((readl_relaxed(host->ioaddr +
4415 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4416 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304417 }
4418
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004419 if ((major == 1) && (minor >= 0x49))
4420 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304421 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004422 * Mask 64-bit support for controller with 32-bit address bus so that
4423 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004424 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004425 if (!msm_host->pdata->largeaddressbus)
4426 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4427
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304428 writel_relaxed(caps, host->ioaddr +
4429 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004430 /* keep track of the value in SDHCI_CAPABILITIES */
4431 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304432
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304433 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304434 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304435 host->cdr_support = true;
4436 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304437}
4438
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004439#ifdef CONFIG_MMC_CQ_HCI
4440static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4441 struct platform_device *pdev)
4442{
4443 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4444 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4445
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304446 if (nocmdq) {
4447 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4448 return;
4449 }
4450
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004451 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004452 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004453 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4454 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004455 host->cq_host = NULL;
4456 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004457 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004458 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004459}
4460#else
4461static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4462 struct platform_device *pdev)
4463{
4464
4465}
4466#endif
4467
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004468static bool sdhci_msm_is_bootdevice(struct device *dev)
4469{
4470 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4471 strlen(saved_command_line))) {
4472 char search_string[50];
4473
4474 snprintf(search_string, ARRAY_SIZE(search_string),
4475 "androidboot.bootdevice=%s", dev_name(dev));
4476 if (strnstr(saved_command_line, search_string,
4477 strlen(saved_command_line)))
4478 return true;
4479 else
4480 return false;
4481 }
4482
4483 /*
4484 * "androidboot.bootdevice=" argument is not present then
4485 * return true as we don't know the boot device anyways.
4486 */
4487 return true;
4488}
4489
Asutosh Das0ef24812012-12-18 16:14:02 +05304490static int sdhci_msm_probe(struct platform_device *pdev)
4491{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304492 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304493 struct sdhci_host *host;
4494 struct sdhci_pltfm_host *pltfm_host;
4495 struct sdhci_msm_host *msm_host;
4496 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004497 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004498 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004499 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304500 struct resource *tlmm_memres = NULL;
4501 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304502 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004503 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304504
4505 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4506 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4507 GFP_KERNEL);
4508 if (!msm_host) {
4509 ret = -ENOMEM;
4510 goto out;
4511 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304512
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304513 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4514 msm_host->mci_removed = true;
4515 msm_host->offset = &sdhci_msm_offset_mci_removed;
4516 } else {
4517 msm_host->mci_removed = false;
4518 msm_host->offset = &sdhci_msm_offset_mci_present;
4519 }
4520 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304521 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4522 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4523 if (IS_ERR(host)) {
4524 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304525 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304526 }
4527
4528 pltfm_host = sdhci_priv(host);
4529 pltfm_host->priv = msm_host;
4530 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304531 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304532
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304533 /* get the ice device vops if present */
4534 ret = sdhci_msm_ice_get_dev(host);
4535 if (ret == -EPROBE_DEFER) {
4536 /*
4537 * SDHCI driver might be probed before ICE driver does.
4538 * In that case we would like to return EPROBE_DEFER code
4539 * in order to delay its probing.
4540 */
4541 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4542 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004543 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304544
4545 } else if (ret == -ENODEV) {
4546 /*
4547 * ICE device is not enabled in DTS file. No need for further
4548 * initialization of ICE driver.
4549 */
4550 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4551 __func__);
4552 } else if (ret) {
4553 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4554 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004555 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304556 }
4557
Asutosh Das0ef24812012-12-18 16:14:02 +05304558 /* Extract platform data */
4559 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004560 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304561 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004562 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4563 ret);
4564 goto pltfm_free;
4565 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004566
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004567 /* Read property to determine if the probe is forced */
4568 force_probe = of_find_property(pdev->dev.of_node,
4569 "qcom,force-sdhc1-probe", NULL);
4570
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004571 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004572 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4573 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004574 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004575 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004576 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004577
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004578 if (disable_slots & (1 << (ret - 1))) {
4579 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4580 ret);
4581 ret = -ENODEV;
4582 goto pltfm_free;
4583 }
4584
Sayali Lokhande5f768322016-04-11 18:36:53 +05304585 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004586 sdhci_slot[ret-1] = msm_host;
4587
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004588 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4589 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304590 if (!msm_host->pdata) {
4591 dev_err(&pdev->dev, "DT parsing error\n");
4592 goto pltfm_free;
4593 }
4594 } else {
4595 dev_err(&pdev->dev, "No device tree node\n");
4596 goto pltfm_free;
4597 }
4598
4599 /* Setup Clocks */
4600
4601 /* Setup SDCC bus voter clock. */
4602 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4603 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4604 /* Vote for max. clk rate for max. performance */
4605 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4606 if (ret)
4607 goto pltfm_free;
4608 ret = clk_prepare_enable(msm_host->bus_clk);
4609 if (ret)
4610 goto pltfm_free;
4611 }
4612
4613 /* Setup main peripheral bus clock */
4614 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4615 if (!IS_ERR(msm_host->pclk)) {
4616 ret = clk_prepare_enable(msm_host->pclk);
4617 if (ret)
4618 goto bus_clk_disable;
4619 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304620 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304621
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304622 /* Setup SDC ufs bus aggr clock */
4623 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4624 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4625 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4626 if (ret) {
4627 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4628 goto pclk_disable;
4629 }
4630 }
4631
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304632 if (msm_host->ice.pdev) {
4633 /* Setup SDC ICE clock */
4634 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4635 if (!IS_ERR(msm_host->ice_clk)) {
4636 /* ICE core has only one clock frequency for now */
4637 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304638 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304639 if (ret) {
4640 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4641 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304642 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304643 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304644 }
4645 ret = clk_prepare_enable(msm_host->ice_clk);
4646 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304647 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304648
4649 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304650 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304651 }
4652 }
4653
Asutosh Das0ef24812012-12-18 16:14:02 +05304654 /* Setup SDC MMC clock */
4655 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4656 if (IS_ERR(msm_host->clk)) {
4657 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304658 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304659 }
4660
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304661 /* Set to the minimum supported clock frequency */
4662 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4663 if (ret) {
4664 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304665 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304666 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304667 ret = clk_prepare_enable(msm_host->clk);
4668 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304669 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304670
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304671 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304672 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304673
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004674 /* Setup CDC calibration fixed feedback clock */
4675 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4676 if (!IS_ERR(msm_host->ff_clk)) {
4677 ret = clk_prepare_enable(msm_host->ff_clk);
4678 if (ret)
4679 goto clk_disable;
4680 }
4681
4682 /* Setup CDC calibration sleep clock */
4683 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4684 if (!IS_ERR(msm_host->sleep_clk)) {
4685 ret = clk_prepare_enable(msm_host->sleep_clk);
4686 if (ret)
4687 goto ff_clk_disable;
4688 }
4689
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004690 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4691
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304692 ret = sdhci_msm_bus_register(msm_host, pdev);
4693 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004694 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304695
4696 if (msm_host->msm_bus_vote.client_handle)
4697 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4698 sdhci_msm_bus_work);
4699 sdhci_msm_bus_voting(host, 1);
4700
Asutosh Das0ef24812012-12-18 16:14:02 +05304701 /* Setup regulators */
4702 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4703 if (ret) {
4704 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304705 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304706 }
4707
4708 /* Reset the core and Enable SDHC mode */
4709 core_memres = platform_get_resource_byname(pdev,
4710 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304711 if (!msm_host->mci_removed) {
4712 if (!core_memres) {
4713 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4714 goto vreg_deinit;
4715 }
4716 msm_host->core_mem = devm_ioremap(&pdev->dev,
4717 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304718
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304719 if (!msm_host->core_mem) {
4720 dev_err(&pdev->dev, "Failed to remap registers\n");
4721 ret = -ENOMEM;
4722 goto vreg_deinit;
4723 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304724 }
4725
Sahitya Tummala079ed852015-10-29 20:18:45 +05304726 tlmm_memres = platform_get_resource_byname(pdev,
4727 IORESOURCE_MEM, "tlmm_mem");
4728 if (tlmm_memres) {
4729 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4730 resource_size(tlmm_memres));
4731
4732 if (!tlmm_mem) {
4733 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4734 ret = -ENOMEM;
4735 goto vreg_deinit;
4736 }
4737 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4738 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4739 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4740 }
4741
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304742 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004743 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304744 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004745 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304746 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304747
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05304748 /*
4749 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
4750 */
4751 writel_relaxed((readl_relaxed(host->ioaddr +
4752 msm_host_offset->CORE_VENDOR_SPEC3) &
4753 ~CORE_FIFO_ALT_EN), host->ioaddr +
4754 msm_host_offset->CORE_VENDOR_SPEC3);
4755
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304756 if (!msm_host->mci_removed) {
4757 /* Set HC_MODE_EN bit in HC_MODE register */
4758 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304759
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304760 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4761 writel_relaxed(readl_relaxed(msm_host->core_mem +
4762 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4763 msm_host->core_mem + CORE_HC_MODE);
4764 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304765 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004766
4767 /*
4768 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4769 * be used as required later on.
4770 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304771 writel_relaxed((readl_relaxed(host->ioaddr +
4772 msm_host_offset->CORE_VENDOR_SPEC) |
4773 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4774 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304775 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304776 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4777 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4778 * interrupt in GIC (by registering the interrupt handler), we need to
4779 * ensure that any pending power irq interrupt status is acknowledged
4780 * otherwise power irq interrupt handler would be fired prematurely.
4781 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304782 irq_status = sdhci_msm_readl_relaxed(host,
4783 msm_host_offset->CORE_PWRCTL_STATUS);
4784 sdhci_msm_writel_relaxed(irq_status, host,
4785 msm_host_offset->CORE_PWRCTL_CLEAR);
4786 irq_ctl = sdhci_msm_readl_relaxed(host,
4787 msm_host_offset->CORE_PWRCTL_CTL);
4788
Subhash Jadavani28137342013-05-14 17:46:43 +05304789 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4790 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4791 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4792 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304793 sdhci_msm_writel_relaxed(irq_ctl, host,
4794 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004795
Subhash Jadavani28137342013-05-14 17:46:43 +05304796 /*
4797 * Ensure that above writes are propogated before interrupt enablement
4798 * in GIC.
4799 */
4800 mb();
4801
4802 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304803 * Following are the deviations from SDHC spec v3.0 -
4804 * 1. Card detection is handled using separate GPIO.
4805 * 2. Bus power control is handled by interacting with PMIC.
4806 */
4807 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4808 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304809 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004810 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304811 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304812 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304813 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304814 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304815 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304816 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304817
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304818 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4819 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4820
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004821 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004822 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4823 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4824 SDHCI_VENDOR_VER_SHIFT));
4825 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4826 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4827 /*
4828 * Add 40us delay in interrupt handler when
4829 * operating at initialization frequency(400KHz).
4830 */
4831 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4832 /*
4833 * Set Software Reset for DAT line in Software
4834 * Reset Register (Bit 2).
4835 */
4836 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4837 }
4838
Asutosh Das214b9662013-06-13 14:27:42 +05304839 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4840
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004841 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004842 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4843 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304844 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004845 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304846 goto vreg_deinit;
4847 }
Subhash Jadavanide139e82017-09-27 11:04:40 +05304848
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004849 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304850 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004851 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304852 if (ret) {
4853 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004854 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304855 goto vreg_deinit;
4856 }
4857
4858 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304859 sdhci_msm_writel_relaxed(INT_MASK, host,
4860 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304861
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304862#ifdef CONFIG_MMC_CLKGATE
4863 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4864 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4865#endif
4866
Asutosh Das0ef24812012-12-18 16:14:02 +05304867 /* Set host capabilities */
4868 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4869 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004870 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304871 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304872 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004873 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004874 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004875 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304876 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004877 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004878 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304879 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304880
4881 if (msm_host->pdata->nonremovable)
4882 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4883
Guoping Yuf7c91332014-08-20 16:56:18 +08004884 if (msm_host->pdata->nonhotplug)
4885 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4886
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004887 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4888
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304889 /* Initialize ICE if present */
4890 if (msm_host->ice.pdev) {
4891 ret = sdhci_msm_ice_init(host);
4892 if (ret) {
4893 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
4894 mmc_hostname(host->mmc), ret);
4895 ret = -EINVAL;
4896 goto vreg_deinit;
4897 }
4898 host->is_crypto_en = true;
4899 /* Packed commands cannot be encrypted/decrypted using ICE */
4900 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
4901 MMC_CAP2_PACKED_WR_CONTROL);
4902 }
4903
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304904 init_completion(&msm_host->pwr_irq_completion);
4905
Sahitya Tummala581df132013-03-12 14:57:46 +05304906 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304907 /*
4908 * Set up the card detect GPIO in active configuration before
4909 * configuring it as an IRQ. Otherwise, it can be in some
4910 * weird/inconsistent state resulting in flood of interrupts.
4911 */
4912 sdhci_msm_setup_pins(msm_host->pdata, true);
4913
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304914 /*
4915 * This delay is needed for stabilizing the card detect GPIO
4916 * line after changing the pull configs.
4917 */
4918 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304919 ret = mmc_gpio_request_cd(msm_host->mmc,
4920 msm_host->pdata->status_gpio, 0);
4921 if (ret) {
4922 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4923 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304924 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304925 }
4926 }
4927
Krishna Konda7feab352013-09-17 23:55:40 -07004928 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4929 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4930 host->dma_mask = DMA_BIT_MASK(64);
4931 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304932 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004933 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304934 host->dma_mask = DMA_BIT_MASK(32);
4935 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304936 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304937 } else {
4938 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4939 }
4940
Ritesh Harjani42876f42015-11-17 17:46:51 +05304941 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4942 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304943 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304944 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4945 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304946 msm_host->is_sdiowakeup_enabled = true;
4947 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4948 sdhci_msm_sdiowakeup_irq,
4949 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4950 "sdhci-msm sdiowakeup", host);
4951 if (ret) {
4952 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4953 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4954 msm_host->pdata->sdiowakeup_irq = -1;
4955 msm_host->is_sdiowakeup_enabled = false;
4956 goto vreg_deinit;
4957 } else {
4958 spin_lock_irqsave(&host->lock, flags);
4959 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304960 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304961 spin_unlock_irqrestore(&host->lock, flags);
4962 }
4963 }
4964
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004965 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304966 ret = sdhci_add_host(host);
4967 if (ret) {
4968 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304969 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304970 }
4971
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05304972 msm_host->pltfm_init_done = true;
4973
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004974 pm_runtime_set_active(&pdev->dev);
4975 pm_runtime_enable(&pdev->dev);
4976 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4977 pm_runtime_use_autosuspend(&pdev->dev);
4978
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304979 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4980 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4981 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4982 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4983 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4984 ret = device_create_file(&pdev->dev,
4985 &msm_host->msm_bus_vote.max_bus_bw);
4986 if (ret)
4987 goto remove_host;
4988
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304989 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4990 msm_host->polling.show = show_polling;
4991 msm_host->polling.store = store_polling;
4992 sysfs_attr_init(&msm_host->polling.attr);
4993 msm_host->polling.attr.name = "polling";
4994 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4995 ret = device_create_file(&pdev->dev, &msm_host->polling);
4996 if (ret)
4997 goto remove_max_bus_bw_file;
4998 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304999
5000 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
5001 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
5002 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
5003 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
5004 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
5005 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5006 if (ret) {
5007 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
5008 mmc_hostname(host->mmc), __func__, ret);
5009 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5010 }
Ankit Jain1d7e5182017-09-20 11:55:38 +05305011 if (sdhci_msm_is_bootdevice(&pdev->dev))
5012 mmc_flush_detect_work(host->mmc);
5013
Asutosh Das0ef24812012-12-18 16:14:02 +05305014 /* Successful initialization */
5015 goto out;
5016
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305017remove_max_bus_bw_file:
5018 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05305019remove_host:
5020 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005021 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305022 sdhci_remove_host(host, dead);
5023vreg_deinit:
5024 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305025bus_unregister:
5026 if (msm_host->msm_bus_vote.client_handle)
5027 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5028 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005029sleep_clk_disable:
5030 if (!IS_ERR(msm_host->sleep_clk))
5031 clk_disable_unprepare(msm_host->sleep_clk);
5032ff_clk_disable:
5033 if (!IS_ERR(msm_host->ff_clk))
5034 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305035clk_disable:
5036 if (!IS_ERR(msm_host->clk))
5037 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05305038bus_aggr_clk_disable:
5039 if (!IS_ERR(msm_host->bus_aggr_clk))
5040 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305041pclk_disable:
5042 if (!IS_ERR(msm_host->pclk))
5043 clk_disable_unprepare(msm_host->pclk);
5044bus_clk_disable:
5045 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
5046 clk_disable_unprepare(msm_host->bus_clk);
5047pltfm_free:
5048 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305049out_host_free:
5050 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305051out:
5052 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
5053 return ret;
5054}
5055
5056static int sdhci_msm_remove(struct platform_device *pdev)
5057{
5058 struct sdhci_host *host = platform_get_drvdata(pdev);
5059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5060 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5061 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
5062 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
5063 0xffffffff);
5064
5065 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305066 if (!gpio_is_valid(msm_host->pdata->status_gpio))
5067 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305068 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005069 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305070 sdhci_remove_host(host, dead);
5071 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05305072
Asutosh Das0ef24812012-12-18 16:14:02 +05305073 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305074
Pratibhasagar V9acf2642013-11-21 21:07:21 +05305075 sdhci_msm_setup_pins(pdata, true);
5076 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305077
5078 if (msm_host->msm_bus_vote.client_handle) {
5079 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5080 sdhci_msm_bus_unregister(msm_host);
5081 }
Asutosh Das0ef24812012-12-18 16:14:02 +05305082 return 0;
5083}
5084
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005085#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05305086static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
5087{
5088 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5089 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5090 unsigned long flags;
5091 int ret = 0;
5092
5093 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
5094 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
5095 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305096 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305097 return 1;
5098 }
5099
5100 spin_lock_irqsave(&host->lock, flags);
5101 if (enable) {
5102 /* configure DAT1 gpio if applicable */
5103 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305104 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305105 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5106 if (!ret)
5107 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
5108 goto out;
5109 } else {
5110 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
5111 mmc_hostname(host->mmc), enable);
5112 }
5113 } else {
5114 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
5115 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5116 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305117 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305118 } else {
5119 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
5120 mmc_hostname(host->mmc), enable);
5121
5122 }
5123 }
5124out:
5125 if (ret)
5126 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
5127 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
5128 ret, msm_host->pdata->sdiowakeup_irq);
5129 spin_unlock_irqrestore(&host->lock, flags);
5130 return ret;
5131}
5132
5133
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005134static int sdhci_msm_runtime_suspend(struct device *dev)
5135{
5136 struct sdhci_host *host = dev_get_drvdata(dev);
5137 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5138 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005139 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305140 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005141
Ritesh Harjani42876f42015-11-17 17:46:51 +05305142 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5143 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305144
Ritesh Harjani42876f42015-11-17 17:46:51 +05305145 sdhci_cfg_irq(host, false, true);
5146
5147defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005148 disable_irq(msm_host->pwr_irq);
5149
5150 /*
5151 * Remove the vote immediately only if clocks are off in which
5152 * case we might have queued work to remove vote but it may not
5153 * be completed before runtime suspend or system suspend.
5154 */
5155 if (!atomic_read(&msm_host->clks_on)) {
5156 if (msm_host->msm_bus_vote.client_handle)
5157 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5158 }
5159
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305160 if (host->is_crypto_en) {
5161 ret = sdhci_msm_ice_suspend(host);
5162 if (ret < 0)
5163 pr_err("%s: failed to suspend crypto engine %d\n",
5164 mmc_hostname(host->mmc), ret);
5165 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005166 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5167 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005168 return 0;
5169}
5170
5171static int sdhci_msm_runtime_resume(struct device *dev)
5172{
5173 struct sdhci_host *host = dev_get_drvdata(dev);
5174 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5175 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005176 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305177 int ret;
5178
5179 if (host->is_crypto_en) {
5180 ret = sdhci_msm_enable_controller_clock(host);
5181 if (ret) {
5182 pr_err("%s: Failed to enable reqd clocks\n",
5183 mmc_hostname(host->mmc));
5184 goto skip_ice_resume;
5185 }
5186 ret = sdhci_msm_ice_resume(host);
5187 if (ret)
5188 pr_err("%s: failed to resume crypto engine %d\n",
5189 mmc_hostname(host->mmc), ret);
5190 }
5191skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005192
Ritesh Harjani42876f42015-11-17 17:46:51 +05305193 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5194 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305195
Ritesh Harjani42876f42015-11-17 17:46:51 +05305196 sdhci_cfg_irq(host, true, true);
5197
5198defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005199 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005200
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005201 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5202 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005203 return 0;
5204}
5205
5206static int sdhci_msm_suspend(struct device *dev)
5207{
5208 struct sdhci_host *host = dev_get_drvdata(dev);
5209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5210 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005211 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305212 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005213 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005214
5215 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5216 (msm_host->mmc->slot.cd_irq >= 0))
5217 disable_irq(msm_host->mmc->slot.cd_irq);
5218
5219 if (pm_runtime_suspended(dev)) {
5220 pr_debug("%s: %s: already runtime suspended\n",
5221 mmc_hostname(host->mmc), __func__);
5222 goto out;
5223 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005224 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005225out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305226 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305227 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5228 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5229 if (sdio_cfg)
5230 sdhci_cfg_irq(host, false, true);
5231 }
5232
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005233 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5234 ktime_to_us(ktime_sub(ktime_get(), start)));
5235 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005236}
5237
5238static int sdhci_msm_resume(struct device *dev)
5239{
5240 struct sdhci_host *host = dev_get_drvdata(dev);
5241 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5242 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5243 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305244 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005245 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005246
5247 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5248 (msm_host->mmc->slot.cd_irq >= 0))
5249 enable_irq(msm_host->mmc->slot.cd_irq);
5250
5251 if (pm_runtime_suspended(dev)) {
5252 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5253 mmc_hostname(host->mmc), __func__);
5254 goto out;
5255 }
5256
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005257 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005258out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305259 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5260 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5261 if (sdio_cfg)
5262 sdhci_cfg_irq(host, true, true);
5263 }
5264
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005265 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5266 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005267 return ret;
5268}
5269
Ritesh Harjani42876f42015-11-17 17:46:51 +05305270static int sdhci_msm_suspend_noirq(struct device *dev)
5271{
5272 struct sdhci_host *host = dev_get_drvdata(dev);
5273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5274 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5275 int ret = 0;
5276
5277 /*
5278 * ksdioirqd may be running, hence retry
5279 * suspend in case the clocks are ON
5280 */
5281 if (atomic_read(&msm_host->clks_on)) {
5282 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5283 mmc_hostname(host->mmc), __func__);
5284 ret = -EAGAIN;
5285 }
5286
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305287 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5288 if (msm_host->sdio_pending_processing)
5289 ret = -EBUSY;
5290
Ritesh Harjani42876f42015-11-17 17:46:51 +05305291 return ret;
5292}
5293
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005294static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305295 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005296 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5297 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305298 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005299};
5300
5301#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5302
5303#else
5304#define SDHCI_MSM_PMOPS NULL
5305#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305306static const struct of_device_id sdhci_msm_dt_match[] = {
5307 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305308 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005309 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305310};
5311MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5312
5313static struct platform_driver sdhci_msm_driver = {
5314 .probe = sdhci_msm_probe,
5315 .remove = sdhci_msm_remove,
5316 .driver = {
5317 .name = "sdhci_msm",
5318 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305319 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305320 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005321 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305322 },
5323};
5324
5325module_platform_driver(sdhci_msm_driver);
5326
5327MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5328MODULE_LICENSE("GPL v2");