blob: 90bc9b235240c8ae85a033948b5e4137d600f73a [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053045#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070046#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053047
Asutosh Das36c2e922015-12-01 12:19:58 +053048#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080049#define CORE_POWER 0x0
50#define CORE_SW_RST (1 << 7)
51
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070052#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080053
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053063
64#define CORE_VERSION_MAJOR_MASK 0xF0000000
65#define CORE_VERSION_MAJOR_SHIFT 28
66
Asutosh Das0ef24812012-12-18 16:14:02 +053067#define CORE_HC_MODE 0x78
68#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070069#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053070
Asutosh Das0ef24812012-12-18 16:14:02 +053071#define CORE_PWRCTL_BUS_OFF 0x01
72#define CORE_PWRCTL_BUS_ON (1 << 1)
73#define CORE_PWRCTL_IO_LOW (1 << 2)
74#define CORE_PWRCTL_IO_HIGH (1 << 3)
75
76#define CORE_PWRCTL_BUS_SUCCESS 0x01
77#define CORE_PWRCTL_BUS_FAIL (1 << 1)
78#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
79#define CORE_PWRCTL_IO_FAIL (1 << 3)
80
81#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070082#define MAX_PHASES 16
83
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070084#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070085#define CORE_DLL_EN (1 << 16)
86#define CORE_CDR_EN (1 << 17)
87#define CORE_CK_OUT_EN (1 << 18)
88#define CORE_CDR_EXT_EN (1 << 19)
89#define CORE_DLL_PDN (1 << 29)
90#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070093#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094
Krishna Konda46fd1432014-10-30 21:13:27 -070095#define CORE_CLK_PWRSAVE (1 << 1)
96#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
97#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
98#define CORE_HC_MCLK_SEL_MASK (3 << 8)
99#define CORE_HC_AUTO_CMD21_EN (1 << 6)
100#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700101#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700102#define CORE_HC_SELECT_IN_EN (1 << 18)
103#define CORE_HC_SELECT_IN_HS400 (6 << 19)
104#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700105#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700106
Pavan Anamula691dd592015-08-25 16:11:20 +0530107#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
108#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530109#define CORE_ONE_MID_EN (1 << 25)
110
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530111#define CORE_8_BIT_SUPPORT (1 << 18)
112#define CORE_3_3V_SUPPORT (1 << 24)
113#define CORE_3_0V_SUPPORT (1 << 25)
114#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300115#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700116
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700117#define CORE_CSR_CDC_CTLR_CFG0 0x130
118#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
119#define CORE_HW_AUTOCAL_ENA (1 << 17)
120
121#define CORE_CSR_CDC_CTLR_CFG1 0x134
122#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
123#define CORE_TIMER_ENA (1 << 16)
124
125#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
126#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
127#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
128#define CORE_CDC_OFFSET_CFG 0x14C
129#define CORE_CSR_CDC_DELAY_CFG 0x150
130#define CORE_CDC_SLAVE_DDA_CFG 0x160
131#define CORE_CSR_CDC_STATUS0 0x164
132#define CORE_CALIBRATION_DONE (1 << 0)
133
134#define CORE_CDC_ERROR_CODE_MASK 0x7000000
135
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300136#define CQ_CMD_DBG_RAM 0x110
137#define CQ_CMD_DBG_RAM_WA 0x150
138#define CQ_CMD_DBG_RAM_OL 0x154
139
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700140#define CORE_CSR_CDC_GEN_CFG 0x178
141#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
142#define CORE_CDC_SWITCH_RC_EN (1 << 1)
143
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700144#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530145#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700146#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530147
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700148#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530149#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530150#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700151
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700152#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800153#define CORE_FLL_CYCLE_CNT (1 << 18)
154#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700155
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530156#define DDR_CONFIG_POR_VAL 0x80040853
157#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
158#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700159#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700160
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700161/* 512 descriptors */
162#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530163#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530164
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800166#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700167
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530169#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700170
Krishna Konda96e6b112013-10-28 15:25:03 -0700171#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200172#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200173#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700174
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530175struct sdhci_msm_offset {
176 u32 CORE_MCI_DATA_CNT;
177 u32 CORE_MCI_STATUS;
178 u32 CORE_MCI_FIFO_CNT;
179 u32 CORE_MCI_VERSION;
180 u32 CORE_GENERICS;
181 u32 CORE_TESTBUS_CONFIG;
182 u32 CORE_TESTBUS_SEL2_BIT;
183 u32 CORE_TESTBUS_ENA;
184 u32 CORE_TESTBUS_SEL2;
185 u32 CORE_PWRCTL_STATUS;
186 u32 CORE_PWRCTL_MASK;
187 u32 CORE_PWRCTL_CLEAR;
188 u32 CORE_PWRCTL_CTL;
189 u32 CORE_SDCC_DEBUG_REG;
190 u32 CORE_DLL_CONFIG;
191 u32 CORE_DLL_STATUS;
192 u32 CORE_VENDOR_SPEC;
193 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
194 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
195 u32 CORE_VENDOR_SPEC_FUNC2;
196 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
197 u32 CORE_DDR_200_CFG;
198 u32 CORE_VENDOR_SPEC3;
199 u32 CORE_DLL_CONFIG_2;
200 u32 CORE_DDR_CONFIG;
201 u32 CORE_DDR_CONFIG_2;
202};
203
204struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
205 .CORE_MCI_DATA_CNT = 0x35C,
206 .CORE_MCI_STATUS = 0x324,
207 .CORE_MCI_FIFO_CNT = 0x308,
208 .CORE_MCI_VERSION = 0x318,
209 .CORE_GENERICS = 0x320,
210 .CORE_TESTBUS_CONFIG = 0x32C,
211 .CORE_TESTBUS_SEL2_BIT = 3,
212 .CORE_TESTBUS_ENA = (1 << 31),
213 .CORE_TESTBUS_SEL2 = (1 << 3),
214 .CORE_PWRCTL_STATUS = 0x240,
215 .CORE_PWRCTL_MASK = 0x244,
216 .CORE_PWRCTL_CLEAR = 0x248,
217 .CORE_PWRCTL_CTL = 0x24C,
218 .CORE_SDCC_DEBUG_REG = 0x358,
219 .CORE_DLL_CONFIG = 0x200,
220 .CORE_DLL_STATUS = 0x208,
221 .CORE_VENDOR_SPEC = 0x20C,
222 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
223 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
224 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
225 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
226 .CORE_DDR_200_CFG = 0x224,
227 .CORE_VENDOR_SPEC3 = 0x250,
228 .CORE_DLL_CONFIG_2 = 0x254,
229 .CORE_DDR_CONFIG = 0x258,
230 .CORE_DDR_CONFIG_2 = 0x25C,
231};
232
233struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
234 .CORE_MCI_DATA_CNT = 0x30,
235 .CORE_MCI_STATUS = 0x34,
236 .CORE_MCI_FIFO_CNT = 0x44,
237 .CORE_MCI_VERSION = 0x050,
238 .CORE_GENERICS = 0x70,
239 .CORE_TESTBUS_CONFIG = 0x0CC,
240 .CORE_TESTBUS_SEL2_BIT = 4,
241 .CORE_TESTBUS_ENA = (1 << 3),
242 .CORE_TESTBUS_SEL2 = (1 << 4),
243 .CORE_PWRCTL_STATUS = 0xDC,
244 .CORE_PWRCTL_MASK = 0xE0,
245 .CORE_PWRCTL_CLEAR = 0xE4,
246 .CORE_PWRCTL_CTL = 0xE8,
247 .CORE_SDCC_DEBUG_REG = 0x124,
248 .CORE_DLL_CONFIG = 0x100,
249 .CORE_DLL_STATUS = 0x108,
250 .CORE_VENDOR_SPEC = 0x10C,
251 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
252 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
253 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
254 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
255 .CORE_DDR_200_CFG = 0x184,
256 .CORE_VENDOR_SPEC3 = 0x1B0,
257 .CORE_DLL_CONFIG_2 = 0x1B4,
258 .CORE_DDR_CONFIG = 0x1B8,
259 .CORE_DDR_CONFIG_2 = 0x1BC,
260};
261
262u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
263{
264 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
265 struct sdhci_msm_host *msm_host = pltfm_host->priv;
266 void __iomem *base_addr;
267
268 if (msm_host->mci_removed)
269 base_addr = host->ioaddr;
270 else
271 base_addr = msm_host->core_mem;
272
273 return readb_relaxed(base_addr + offset);
274}
275
276u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
277{
278 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
279 struct sdhci_msm_host *msm_host = pltfm_host->priv;
280 void __iomem *base_addr;
281
282 if (msm_host->mci_removed)
283 base_addr = host->ioaddr;
284 else
285 base_addr = msm_host->core_mem;
286
287 return readl_relaxed(base_addr + offset);
288}
289
290void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
291{
292 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
293 struct sdhci_msm_host *msm_host = pltfm_host->priv;
294 void __iomem *base_addr;
295
296 if (msm_host->mci_removed)
297 base_addr = host->ioaddr;
298 else
299 base_addr = msm_host->core_mem;
300
301 writeb_relaxed(val, base_addr + offset);
302}
303
304void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
305{
306 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
307 struct sdhci_msm_host *msm_host = pltfm_host->priv;
308 void __iomem *base_addr;
309
310 if (msm_host->mci_removed)
311 base_addr = host->ioaddr;
312 else
313 base_addr = msm_host->core_mem;
314
315 writel_relaxed(val, base_addr + offset);
316}
317
Ritesh Harjani82124772014-11-04 15:34:00 +0530318/* Timeout value to avoid infinite waiting for pwr_irq */
319#define MSM_PWR_IRQ_TIMEOUT_MS 5000
320
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700321static const u32 tuning_block_64[] = {
322 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
323 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
324 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
325 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
326};
327
328static const u32 tuning_block_128[] = {
329 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
330 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
331 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
332 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
333 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
334 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
335 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
336 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
337};
Asutosh Das0ef24812012-12-18 16:14:02 +0530338
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700339/* global to hold each slot instance for debug */
340static struct sdhci_msm_host *sdhci_slot[2];
341
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700342static int disable_slots;
343/* root can write, others read */
344module_param(disable_slots, int, S_IRUGO|S_IWUSR);
345
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530346static bool nocmdq;
347module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
348
Asutosh Das0ef24812012-12-18 16:14:02 +0530349enum vdd_io_level {
350 /* set vdd_io_data->low_vol_level */
351 VDD_IO_LOW,
352 /* set vdd_io_data->high_vol_level */
353 VDD_IO_HIGH,
354 /*
355 * set whatever there in voltage_level (third argument) of
356 * sdhci_msm_set_vdd_io_vol() function.
357 */
358 VDD_IO_SET_LEVEL,
359};
360
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700361/* MSM platform specific tuning */
362static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
363 u8 poll)
364{
365 int rc = 0;
366 u32 wait_cnt = 50;
367 u8 ck_out_en = 0;
368 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530369 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
370 struct sdhci_msm_host *msm_host = pltfm_host->priv;
371 const struct sdhci_msm_offset *msm_host_offset =
372 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700373
374 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530375 ck_out_en = !!(readl_relaxed(host->ioaddr +
376 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700377
378 while (ck_out_en != poll) {
379 if (--wait_cnt == 0) {
380 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
381 mmc_hostname(mmc), __func__, poll);
382 rc = -ETIMEDOUT;
383 goto out;
384 }
385 udelay(1);
386
387 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530388 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700389 }
390out:
391 return rc;
392}
393
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530394/*
395 * Enable CDR to track changes of DAT lines and adjust sampling
396 * point according to voltage/temperature variations
397 */
398static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
399{
400 int rc = 0;
401 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530402 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
403 struct sdhci_msm_host *msm_host = pltfm_host->priv;
404 const struct sdhci_msm_offset *msm_host_offset =
405 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530406
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530407 config = readl_relaxed(host->ioaddr +
408 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530409 config |= CORE_CDR_EN;
410 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530411 writel_relaxed(config, host->ioaddr +
412 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530413
414 rc = msm_dll_poll_ck_out_en(host, 0);
415 if (rc)
416 goto err;
417
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530418 writel_relaxed((readl_relaxed(host->ioaddr +
419 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
420 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530421
422 rc = msm_dll_poll_ck_out_en(host, 1);
423 if (rc)
424 goto err;
425 goto out;
426err:
427 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
428out:
429 return rc;
430}
431
432static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
433 *attr, const char *buf, size_t count)
434{
435 struct sdhci_host *host = dev_get_drvdata(dev);
436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
437 struct sdhci_msm_host *msm_host = pltfm_host->priv;
438 u32 tmp;
439 unsigned long flags;
440
441 if (!kstrtou32(buf, 0, &tmp)) {
442 spin_lock_irqsave(&host->lock, flags);
443 msm_host->en_auto_cmd21 = !!tmp;
444 spin_unlock_irqrestore(&host->lock, flags);
445 }
446 return count;
447}
448
449static ssize_t show_auto_cmd21(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct sdhci_host *host = dev_get_drvdata(dev);
453 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
454 struct sdhci_msm_host *msm_host = pltfm_host->priv;
455
456 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
457}
458
459/* MSM auto-tuning handler */
460static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
461 bool enable,
462 u32 type)
463{
464 int rc = 0;
465 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
466 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530467 const struct sdhci_msm_offset *msm_host_offset =
468 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530469 u32 val = 0;
470
471 if (!msm_host->en_auto_cmd21)
472 return 0;
473
474 if (type == MMC_SEND_TUNING_BLOCK_HS200)
475 val = CORE_HC_AUTO_CMD21_EN;
476 else
477 return 0;
478
479 if (enable) {
480 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530481 writel_relaxed(readl_relaxed(host->ioaddr +
482 msm_host_offset->CORE_VENDOR_SPEC) | val,
483 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530484 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530485 writel_relaxed(readl_relaxed(host->ioaddr +
486 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
487 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530488 }
489 return rc;
490}
491
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700492static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
493{
494 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530495 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
496 struct sdhci_msm_host *msm_host = pltfm_host->priv;
497 const struct sdhci_msm_offset *msm_host_offset =
498 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700499 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
500 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
501 0x8};
502 unsigned long flags;
503 u32 config;
504 struct mmc_host *mmc = host->mmc;
505
506 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
507 spin_lock_irqsave(&host->lock, flags);
508
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530509 config = readl_relaxed(host->ioaddr +
510 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700511 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
512 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530513 writel_relaxed(config, host->ioaddr +
514 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700515
516 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
517 rc = msm_dll_poll_ck_out_en(host, 0);
518 if (rc)
519 goto err_out;
520
521 /*
522 * Write the selected DLL clock output phase (0 ... 15)
523 * to CDR_SELEXT bit field of DLL_CONFIG register.
524 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530525 writel_relaxed(((readl_relaxed(host->ioaddr +
526 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700527 & ~(0xF << 20))
528 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530529 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700530
531 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530532 writel_relaxed((readl_relaxed(host->ioaddr +
533 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
534 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700535
536 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
537 rc = msm_dll_poll_ck_out_en(host, 1);
538 if (rc)
539 goto err_out;
540
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530541 config = readl_relaxed(host->ioaddr +
542 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700543 config |= CORE_CDR_EN;
544 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530545 writel_relaxed(config, host->ioaddr +
546 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700547 goto out;
548
549err_out:
550 pr_err("%s: %s: Failed to set DLL phase: %d\n",
551 mmc_hostname(mmc), __func__, phase);
552out:
553 spin_unlock_irqrestore(&host->lock, flags);
554 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
555 return rc;
556}
557
558/*
559 * Find out the greatest range of consecuitive selected
560 * DLL clock output phases that can be used as sampling
561 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700562 * timing mode) or for eMMC4.5 card read operation (in
563 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700564 * Select the 3/4 of the range and configure the DLL with the
565 * selected DLL clock output phase.
566 */
567
568static int msm_find_most_appropriate_phase(struct sdhci_host *host,
569 u8 *phase_table, u8 total_phases)
570{
571 int ret;
572 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
573 u8 phases_per_row[MAX_PHASES] = {0};
574 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
575 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
576 bool phase_0_found = false, phase_15_found = false;
577 struct mmc_host *mmc = host->mmc;
578
579 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
580 if (!total_phases || (total_phases > MAX_PHASES)) {
581 pr_err("%s: %s: invalid argument: total_phases=%d\n",
582 mmc_hostname(mmc), __func__, total_phases);
583 return -EINVAL;
584 }
585
586 for (cnt = 0; cnt < total_phases; cnt++) {
587 ranges[row_index][col_index] = phase_table[cnt];
588 phases_per_row[row_index] += 1;
589 col_index++;
590
591 if ((cnt + 1) == total_phases) {
592 continue;
593 /* check if next phase in phase_table is consecutive or not */
594 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
595 row_index++;
596 col_index = 0;
597 }
598 }
599
600 if (row_index >= MAX_PHASES)
601 return -EINVAL;
602
603 /* Check if phase-0 is present in first valid window? */
604 if (!ranges[0][0]) {
605 phase_0_found = true;
606 phase_0_raw_index = 0;
607 /* Check if cycle exist between 2 valid windows */
608 for (cnt = 1; cnt <= row_index; cnt++) {
609 if (phases_per_row[cnt]) {
610 for (i = 0; i < phases_per_row[cnt]; i++) {
611 if (ranges[cnt][i] == 15) {
612 phase_15_found = true;
613 phase_15_raw_index = cnt;
614 break;
615 }
616 }
617 }
618 }
619 }
620
621 /* If 2 valid windows form cycle then merge them as single window */
622 if (phase_0_found && phase_15_found) {
623 /* number of phases in raw where phase 0 is present */
624 u8 phases_0 = phases_per_row[phase_0_raw_index];
625 /* number of phases in raw where phase 15 is present */
626 u8 phases_15 = phases_per_row[phase_15_raw_index];
627
628 if (phases_0 + phases_15 >= MAX_PHASES)
629 /*
630 * If there are more than 1 phase windows then total
631 * number of phases in both the windows should not be
632 * more than or equal to MAX_PHASES.
633 */
634 return -EINVAL;
635
636 /* Merge 2 cyclic windows */
637 i = phases_15;
638 for (cnt = 0; cnt < phases_0; cnt++) {
639 ranges[phase_15_raw_index][i] =
640 ranges[phase_0_raw_index][cnt];
641 if (++i >= MAX_PHASES)
642 break;
643 }
644
645 phases_per_row[phase_0_raw_index] = 0;
646 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
647 }
648
649 for (cnt = 0; cnt <= row_index; cnt++) {
650 if (phases_per_row[cnt] > curr_max) {
651 curr_max = phases_per_row[cnt];
652 selected_row_index = cnt;
653 }
654 }
655
656 i = ((curr_max * 3) / 4);
657 if (i)
658 i--;
659
660 ret = (int)ranges[selected_row_index][i];
661
662 if (ret >= MAX_PHASES) {
663 ret = -EINVAL;
664 pr_err("%s: %s: invalid phase selected=%d\n",
665 mmc_hostname(mmc), __func__, ret);
666 }
667
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return ret;
670}
671
672static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
673{
674 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530675 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
676 struct sdhci_msm_host *msm_host = pltfm_host->priv;
677 const struct sdhci_msm_offset *msm_host_offset =
678 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700679
680 /* Program the MCLK value to MCLK_FREQ bit field */
681 if (host->clock <= 112000000)
682 mclk_freq = 0;
683 else if (host->clock <= 125000000)
684 mclk_freq = 1;
685 else if (host->clock <= 137000000)
686 mclk_freq = 2;
687 else if (host->clock <= 150000000)
688 mclk_freq = 3;
689 else if (host->clock <= 162000000)
690 mclk_freq = 4;
691 else if (host->clock <= 175000000)
692 mclk_freq = 5;
693 else if (host->clock <= 187000000)
694 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700695 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700696 mclk_freq = 7;
697
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530698 writel_relaxed(((readl_relaxed(host->ioaddr +
699 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530701 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700702}
703
704/* Initialize the DLL (Programmable Delay Line ) */
705static int msm_init_cm_dll(struct sdhci_host *host)
706{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800707 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
708 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530709 const struct sdhci_msm_offset *msm_host_offset =
710 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700711 struct mmc_host *mmc = host->mmc;
712 int rc = 0;
713 unsigned long flags;
714 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530715 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700716
717 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
718 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530719 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
720 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530721 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700722 /*
723 * Make sure that clock is always enabled when DLL
724 * tuning is in progress. Keeping PWRSAVE ON may
725 * turn off the clock. So let's disable the PWRSAVE
726 * here and re-enable it once tuning is completed.
727 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530728 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530729 writel_relaxed((readl_relaxed(host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC)
731 & ~CORE_CLK_PWRSAVE), host->ioaddr +
732 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530733 curr_pwrsave = false;
734 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700735
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800736 if (msm_host->use_updated_dll_reset) {
737 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530738 writel_relaxed((readl_relaxed(host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG)
740 & ~CORE_CK_OUT_EN), host->ioaddr +
741 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800742
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530743 writel_relaxed((readl_relaxed(host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2)
745 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
746 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800747 }
748
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700749 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530750 writel_relaxed((readl_relaxed(host->ioaddr +
751 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
752 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700753
754 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530755 writel_relaxed((readl_relaxed(host->ioaddr +
756 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
757 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700758 msm_cm_dll_set_freq(host);
759
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800760 if (msm_host->use_updated_dll_reset) {
761 u32 mclk_freq = 0;
762
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530763 if ((readl_relaxed(host->ioaddr +
764 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800765 & CORE_FLL_CYCLE_CNT))
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
767 else
768 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
769
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530770 writel_relaxed(((readl_relaxed(host->ioaddr +
771 msm_host_offset->CORE_DLL_CONFIG_2)
772 & ~(0xFF << 10)) | (mclk_freq << 10)),
773 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800774 /* wait for 5us before enabling DLL clock */
775 udelay(5);
776 }
777
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700778 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530779 writel_relaxed((readl_relaxed(host->ioaddr +
780 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
781 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700782
783 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530784 writel_relaxed((readl_relaxed(host->ioaddr +
785 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
786 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700787
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800788 if (msm_host->use_updated_dll_reset) {
789 msm_cm_dll_set_freq(host);
790 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530791 writel_relaxed((readl_relaxed(host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2)
793 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
794 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800795 }
796
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700797 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530798 writel_relaxed((readl_relaxed(host->ioaddr +
799 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
800 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700801
802 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530803 writel_relaxed((readl_relaxed(host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG)
805 | CORE_CK_OUT_EN), host->ioaddr +
806 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700807
808 wait_cnt = 50;
809 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530810 while (!(readl_relaxed(host->ioaddr +
811 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700812 /* max. wait for 50us sec for LOCK bit to be set */
813 if (--wait_cnt == 0) {
814 pr_err("%s: %s: DLL failed to LOCK\n",
815 mmc_hostname(mmc), __func__);
816 rc = -ETIMEDOUT;
817 goto out;
818 }
819 /* wait for 1us before polling again */
820 udelay(1);
821 }
822
823out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530824 /* Restore the correct PWRSAVE state */
825 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530826 u32 reg = readl_relaxed(host->ioaddr +
827 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530828
829 if (prev_pwrsave)
830 reg |= CORE_CLK_PWRSAVE;
831 else
832 reg &= ~CORE_CLK_PWRSAVE;
833
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530834 writel_relaxed(reg, host->ioaddr +
835 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530836 }
837
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700838 spin_unlock_irqrestore(&host->lock, flags);
839 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
840 return rc;
841}
842
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700843static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
844{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700845 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700846 int ret = 0;
847 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
850 const struct sdhci_msm_offset *msm_host_offset =
851 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700852
853 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
854
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700855 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530856 writel_relaxed((readl_relaxed(host->ioaddr +
857 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530859 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700860
861 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
862 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
863 & ~CORE_CDC_SWITCH_BYPASS_OFF),
864 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
865
866 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
867 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
868 | CORE_CDC_SWITCH_RC_EN),
869 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
870
871 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530872 writel_relaxed((readl_relaxed(host->ioaddr +
873 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530875 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700876
877 /*
878 * Perform CDC Register Initialization Sequence
879 *
880 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
881 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
882 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
883 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
884 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
885 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
886 * CORE_CSR_CDC_DELAY_CFG 0x3AC
887 * CORE_CDC_OFFSET_CFG 0x0
888 * CORE_CDC_SLAVE_DDA_CFG 0x16334
889 */
890
891 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
892 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
893 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
894 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
895 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
896 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700897 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700898 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
899 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
900
901 /* CDC HW Calibration */
902
903 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
904 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
905 | CORE_SW_TRIG_FULL_CALIB),
906 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
907
908 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
909 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
910 & ~CORE_SW_TRIG_FULL_CALIB),
911 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
912
913 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
914 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
915 | CORE_HW_AUTOCAL_ENA),
916 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
917
918 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
919 writel_relaxed((readl_relaxed(host->ioaddr +
920 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
921 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
922
923 mb();
924
925 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700926 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
927 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
928
929 if (ret == -ETIMEDOUT) {
930 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700932 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700933 }
934
935 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
936 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
937 & CORE_CDC_ERROR_CODE_MASK;
938 if (cdc_err) {
939 pr_err("%s: %s: CDC Error Code %d\n",
940 mmc_hostname(host->mmc), __func__, cdc_err);
941 ret = -EINVAL;
942 goto out;
943 }
944
945 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530946 writel_relaxed((readl_relaxed(host->ioaddr +
947 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530949 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700950out:
951 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
952 __func__, ret);
953 return ret;
954}
955
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700956static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
957{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530958 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
959 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530960 const struct sdhci_msm_offset *msm_host_offset =
961 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530962 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700963 int ret = 0;
964
965 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
966
967 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530968 * Reprogramming the value in case it might have been modified by
969 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700970 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700971 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530972 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
973 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700974 } else {
975 ddr_config = DDR_CONFIG_POR_VAL &
976 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
977 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530978 writel_relaxed(ddr_config, host->ioaddr +
979 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700980 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700981
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530982 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530983 writel_relaxed((readl_relaxed(host->ioaddr +
984 msm_host_offset->CORE_DDR_200_CFG)
985 | CORE_CMDIN_RCLK_EN), host->ioaddr +
986 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530987
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700988 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530989 writel_relaxed((readl_relaxed(host->ioaddr +
990 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530992 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700993
994 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530995 ret = readl_poll_timeout(host->ioaddr +
996 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700997 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
998
999 if (ret == -ETIMEDOUT) {
1000 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1001 mmc_hostname(host->mmc), __func__);
1002 goto out;
1003 }
1004
Ritesh Harjani764065e2015-05-13 14:14:45 +05301005 /*
1006 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1007 * when MCLK is gated OFF, it is not gated for less than 0.5us
1008 * and MCLK must be switched on for at-least 1us before DATA
1009 * starts coming. Controllers with 14lpp tech DLL cannot
1010 * guarantee above requirement. So PWRSAVE_DLL should not be
1011 * turned on for host controllers using this DLL.
1012 */
1013 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301014 writel_relaxed((readl_relaxed(host->ioaddr +
1015 msm_host_offset->CORE_VENDOR_SPEC3)
1016 | CORE_PWRSAVE_DLL), host->ioaddr +
1017 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001018 mb();
1019out:
1020 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1021 __func__, ret);
1022 return ret;
1023}
1024
Ritesh Harjaniea709662015-05-27 15:40:24 +05301025static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1026{
1027 int ret = 0;
1028 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1029 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1030 struct mmc_host *mmc = host->mmc;
1031
1032 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1033
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301034 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1035 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301036 mmc_hostname(mmc));
1037 return -EINVAL;
1038 }
1039
1040 if (msm_host->calibration_done ||
1041 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1042 return 0;
1043 }
1044
1045 /*
1046 * Reset the tuning block.
1047 */
1048 ret = msm_init_cm_dll(host);
1049 if (ret)
1050 goto out;
1051
1052 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1053out:
1054 if (!ret)
1055 msm_host->calibration_done = true;
1056 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1057 __func__, ret);
1058 return ret;
1059}
1060
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001061static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1062{
1063 int ret = 0;
1064 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1065 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301066 const struct sdhci_msm_offset *msm_host_offset =
1067 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001068
1069 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1070
1071 /*
1072 * Retuning in HS400 (DDR mode) will fail, just reset the
1073 * tuning block and restore the saved tuning phase.
1074 */
1075 ret = msm_init_cm_dll(host);
1076 if (ret)
1077 goto out;
1078
1079 /* Set the selected phase in delay line hw block */
1080 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1081 if (ret)
1082 goto out;
1083
Krishna Konda0e8efba2014-06-23 14:50:38 -07001084 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301085 writel_relaxed((readl_relaxed(host->ioaddr +
1086 msm_host_offset->CORE_DLL_CONFIG)
1087 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1088 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001089
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001090 if (msm_host->use_cdclp533)
1091 /* Calibrate CDCLP533 DLL HW */
1092 ret = sdhci_msm_cdclp533_calibration(host);
1093 else
1094 /* Calibrate CM_DLL_SDC4 HW */
1095 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1096out:
1097 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1098 __func__, ret);
1099 return ret;
1100}
1101
Krishna Konda96e6b112013-10-28 15:25:03 -07001102static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1103 u8 drv_type)
1104{
1105 struct mmc_command cmd = {0};
1106 struct mmc_request mrq = {NULL};
1107 struct mmc_host *mmc = host->mmc;
1108 u8 val = ((drv_type << 4) | 2);
1109
1110 cmd.opcode = MMC_SWITCH;
1111 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1112 (EXT_CSD_HS_TIMING << 16) |
1113 (val << 8) |
1114 EXT_CSD_CMD_SET_NORMAL;
1115 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1116 /* 1 sec */
1117 cmd.busy_timeout = 1000 * 1000;
1118
1119 memset(cmd.resp, 0, sizeof(cmd.resp));
1120 cmd.retries = 3;
1121
1122 mrq.cmd = &cmd;
1123 cmd.data = NULL;
1124
1125 mmc_wait_for_req(mmc, &mrq);
1126 pr_debug("%s: %s: set card drive type to %d\n",
1127 mmc_hostname(mmc), __func__,
1128 drv_type);
1129}
1130
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001131int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1132{
1133 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301134 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001135 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001136 const u32 *tuning_block_pattern = tuning_block_64;
1137 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1138 int rc;
1139 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301140 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1142 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001143 u8 drv_type = 0;
1144 bool drv_type_changed = false;
1145 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301146 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301147 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301148
1149 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001150 * Tuning is required for SDR104, HS200 and HS400 cards and
1151 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301152 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001153 if (host->clock <= CORE_FREQ_100MHZ ||
1154 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1155 (ios.timing == MMC_TIMING_MMC_HS200) ||
1156 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301157 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001158
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301159 /*
1160 * Don't allow re-tuning for CRC errors observed for any commands
1161 * that are sent during tuning sequence itself.
1162 */
1163 if (msm_host->tuning_in_progress)
1164 return 0;
1165 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001166 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001167
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001168 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001169 if (msm_host->tuning_done && !msm_host->calibration_done &&
1170 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001171 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001172 spin_lock_irqsave(&host->lock, flags);
1173 if (!rc)
1174 msm_host->calibration_done = true;
1175 spin_unlock_irqrestore(&host->lock, flags);
1176 goto out;
1177 }
1178
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001179 spin_lock_irqsave(&host->lock, flags);
1180
1181 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1182 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1183 tuning_block_pattern = tuning_block_128;
1184 size = sizeof(tuning_block_128);
1185 }
1186 spin_unlock_irqrestore(&host->lock, flags);
1187
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001188 data_buf = kmalloc(size, GFP_KERNEL);
1189 if (!data_buf) {
1190 rc = -ENOMEM;
1191 goto out;
1192 }
1193
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301194retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001195 tuned_phase_cnt = 0;
1196
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301197 /* first of all reset the tuning block */
1198 rc = msm_init_cm_dll(host);
1199 if (rc)
1200 goto kfree;
1201
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001202 phase = 0;
1203 do {
1204 struct mmc_command cmd = {0};
1205 struct mmc_data data = {0};
1206 struct mmc_request mrq = {
1207 .cmd = &cmd,
1208 .data = &data
1209 };
1210 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301211 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001212
1213 /* set the phase in delay line hw block */
1214 rc = msm_config_cm_dll_phase(host, phase);
1215 if (rc)
1216 goto kfree;
1217
1218 cmd.opcode = opcode;
1219 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1220
1221 data.blksz = size;
1222 data.blocks = 1;
1223 data.flags = MMC_DATA_READ;
1224 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1225
1226 data.sg = &sg;
1227 data.sg_len = 1;
1228 sg_init_one(&sg, data_buf, size);
1229 memset(data_buf, 0, size);
1230 mmc_wait_for_req(mmc, &mrq);
1231
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301232 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301233 /*
1234 * Set the dll to last known good phase while sending
1235 * status command to ensure that status command won't
1236 * fail due to bad phase.
1237 */
1238 if (tuned_phase_cnt)
1239 last_good_phase =
1240 tuned_phases[tuned_phase_cnt-1];
1241 else if (msm_host->saved_tuning_phase !=
1242 INVALID_TUNING_PHASE)
1243 last_good_phase = msm_host->saved_tuning_phase;
1244
1245 rc = msm_config_cm_dll_phase(host, last_good_phase);
1246 if (rc)
1247 goto kfree;
1248
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301249 sts_cmd.opcode = MMC_SEND_STATUS;
1250 sts_cmd.arg = card->rca << 16;
1251 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1252 sts_retry = 5;
1253 while (sts_retry) {
1254 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1255
1256 if (sts_cmd.error ||
1257 (R1_CURRENT_STATE(sts_cmd.resp[0])
1258 != R1_STATE_TRAN)) {
1259 sts_retry--;
1260 /*
1261 * wait for at least 146 MCLK cycles for
1262 * the card to move to TRANS state. As
1263 * the MCLK would be min 200MHz for
1264 * tuning, we need max 0.73us delay. To
1265 * be on safer side 1ms delay is given.
1266 */
1267 usleep_range(1000, 1200);
1268 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1269 mmc_hostname(mmc), phase,
1270 sts_cmd.error, sts_cmd.resp[0]);
1271 continue;
1272 }
1273 break;
1274 };
1275 }
1276
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001277 if (!cmd.error && !data.error &&
1278 !memcmp(data_buf, tuning_block_pattern, size)) {
1279 /* tuning is successful at this tuning point */
1280 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001281 pr_debug("%s: %s: found *** good *** phase = %d\n",
1282 mmc_hostname(mmc), __func__, phase);
1283 } else {
1284 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001285 mmc_hostname(mmc), __func__, phase);
1286 }
1287 } while (++phase < 16);
1288
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301289 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1290 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001291 /*
1292 * If all phases pass then its a problem. So change the card's
1293 * drive type to a different value, if supported and repeat
1294 * tuning until at least one phase fails. Then set the original
1295 * drive type back.
1296 *
1297 * If all the phases still pass after trying all possible
1298 * drive types, then one of those 16 phases will be picked.
1299 * This is no different from what was going on before the
1300 * modification to change drive type and retune.
1301 */
1302 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1303 tuned_phase_cnt);
1304
1305 /* set drive type to other value . default setting is 0x0 */
1306 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001307 pr_debug("%s: trying different drive strength (%d)\n",
1308 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001309 if (card->ext_csd.raw_driver_strength &
1310 (1 << drv_type)) {
1311 sdhci_msm_set_mmc_drv_type(host, opcode,
1312 drv_type);
1313 if (!drv_type_changed)
1314 drv_type_changed = true;
1315 goto retry;
1316 }
1317 }
1318 }
1319
1320 /* reset drive type to default (50 ohm) if changed */
1321 if (drv_type_changed)
1322 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1323
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001324 if (tuned_phase_cnt) {
1325 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1326 tuned_phase_cnt);
1327 if (rc < 0)
1328 goto kfree;
1329 else
1330 phase = (u8)rc;
1331
1332 /*
1333 * Finally set the selected phase in delay
1334 * line hw block.
1335 */
1336 rc = msm_config_cm_dll_phase(host, phase);
1337 if (rc)
1338 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001339 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001340 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1341 mmc_hostname(mmc), __func__, phase);
1342 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301343 if (--tuning_seq_cnt)
1344 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001345 /* tuning failed */
1346 pr_err("%s: %s: no tuning point found\n",
1347 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301348 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001349 }
1350
1351kfree:
1352 kfree(data_buf);
1353out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001354 spin_lock_irqsave(&host->lock, flags);
1355 if (!rc)
1356 msm_host->tuning_done = true;
1357 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301358 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001359 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001360 return rc;
1361}
1362
Asutosh Das0ef24812012-12-18 16:14:02 +05301363static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1364{
1365 struct sdhci_msm_gpio_data *curr;
1366 int i, ret = 0;
1367
1368 curr = pdata->pin_data->gpio_data;
1369 for (i = 0; i < curr->size; i++) {
1370 if (!gpio_is_valid(curr->gpio[i].no)) {
1371 ret = -EINVAL;
1372 pr_err("%s: Invalid gpio = %d\n", __func__,
1373 curr->gpio[i].no);
1374 goto free_gpios;
1375 }
1376 if (enable) {
1377 ret = gpio_request(curr->gpio[i].no,
1378 curr->gpio[i].name);
1379 if (ret) {
1380 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1381 __func__, curr->gpio[i].no,
1382 curr->gpio[i].name, ret);
1383 goto free_gpios;
1384 }
1385 curr->gpio[i].is_enabled = true;
1386 } else {
1387 gpio_free(curr->gpio[i].no);
1388 curr->gpio[i].is_enabled = false;
1389 }
1390 }
1391 return ret;
1392
1393free_gpios:
1394 for (i--; i >= 0; i--) {
1395 gpio_free(curr->gpio[i].no);
1396 curr->gpio[i].is_enabled = false;
1397 }
1398 return ret;
1399}
1400
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301401static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1402 bool enable)
1403{
1404 int ret = 0;
1405
1406 if (enable)
1407 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1408 pdata->pctrl_data->pins_active);
1409 else
1410 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1411 pdata->pctrl_data->pins_sleep);
1412
1413 if (ret < 0)
1414 pr_err("%s state for pinctrl failed with %d\n",
1415 enable ? "Enabling" : "Disabling", ret);
1416
1417 return ret;
1418}
1419
Asutosh Das0ef24812012-12-18 16:14:02 +05301420static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1421{
1422 int ret = 0;
1423
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301424 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301425 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301426 } else if (pdata->pctrl_data) {
1427 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1428 goto out;
1429 } else if (!pdata->pin_data) {
1430 return 0;
1431 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301432
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301433 if (pdata->pin_data->is_gpio)
1434 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301435out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301436 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301437 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301438
1439 return ret;
1440}
1441
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301442static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1443 u32 **out, int *len, u32 size)
1444{
1445 int ret = 0;
1446 struct device_node *np = dev->of_node;
1447 size_t sz;
1448 u32 *arr = NULL;
1449
1450 if (!of_get_property(np, prop_name, len)) {
1451 ret = -EINVAL;
1452 goto out;
1453 }
1454 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001455 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301456 dev_err(dev, "%s invalid size\n", prop_name);
1457 ret = -EINVAL;
1458 goto out;
1459 }
1460
1461 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1462 if (!arr) {
1463 dev_err(dev, "%s failed allocating memory\n", prop_name);
1464 ret = -ENOMEM;
1465 goto out;
1466 }
1467
1468 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1469 if (ret < 0) {
1470 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1471 goto out;
1472 }
1473 *out = arr;
1474out:
1475 if (ret)
1476 *len = 0;
1477 return ret;
1478}
1479
Asutosh Das0ef24812012-12-18 16:14:02 +05301480#define MAX_PROP_SIZE 32
1481static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1482 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1483{
1484 int len, ret = 0;
1485 const __be32 *prop;
1486 char prop_name[MAX_PROP_SIZE];
1487 struct sdhci_msm_reg_data *vreg;
1488 struct device_node *np = dev->of_node;
1489
1490 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1491 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301492 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301493 return ret;
1494 }
1495
1496 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1497 if (!vreg) {
1498 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1499 ret = -ENOMEM;
1500 return ret;
1501 }
1502
1503 vreg->name = vreg_name;
1504
1505 snprintf(prop_name, MAX_PROP_SIZE,
1506 "qcom,%s-always-on", vreg_name);
1507 if (of_get_property(np, prop_name, NULL))
1508 vreg->is_always_on = true;
1509
1510 snprintf(prop_name, MAX_PROP_SIZE,
1511 "qcom,%s-lpm-sup", vreg_name);
1512 if (of_get_property(np, prop_name, NULL))
1513 vreg->lpm_sup = true;
1514
1515 snprintf(prop_name, MAX_PROP_SIZE,
1516 "qcom,%s-voltage-level", vreg_name);
1517 prop = of_get_property(np, prop_name, &len);
1518 if (!prop || (len != (2 * sizeof(__be32)))) {
1519 dev_warn(dev, "%s %s property\n",
1520 prop ? "invalid format" : "no", prop_name);
1521 } else {
1522 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1523 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1524 }
1525
1526 snprintf(prop_name, MAX_PROP_SIZE,
1527 "qcom,%s-current-level", vreg_name);
1528 prop = of_get_property(np, prop_name, &len);
1529 if (!prop || (len != (2 * sizeof(__be32)))) {
1530 dev_warn(dev, "%s %s property\n",
1531 prop ? "invalid format" : "no", prop_name);
1532 } else {
1533 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1534 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1535 }
1536
1537 *vreg_data = vreg;
1538 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1539 vreg->name, vreg->is_always_on ? "always_on," : "",
1540 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1541 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1542
1543 return ret;
1544}
1545
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301546static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1547 struct sdhci_msm_pltfm_data *pdata)
1548{
1549 struct sdhci_pinctrl_data *pctrl_data;
1550 struct pinctrl *pctrl;
1551 int ret = 0;
1552
1553 /* Try to obtain pinctrl handle */
1554 pctrl = devm_pinctrl_get(dev);
1555 if (IS_ERR(pctrl)) {
1556 ret = PTR_ERR(pctrl);
1557 goto out;
1558 }
1559 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1560 if (!pctrl_data) {
1561 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1562 ret = -ENOMEM;
1563 goto out;
1564 }
1565 pctrl_data->pctrl = pctrl;
1566 /* Look-up and keep the states handy to be used later */
1567 pctrl_data->pins_active = pinctrl_lookup_state(
1568 pctrl_data->pctrl, "active");
1569 if (IS_ERR(pctrl_data->pins_active)) {
1570 ret = PTR_ERR(pctrl_data->pins_active);
1571 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1572 goto out;
1573 }
1574 pctrl_data->pins_sleep = pinctrl_lookup_state(
1575 pctrl_data->pctrl, "sleep");
1576 if (IS_ERR(pctrl_data->pins_sleep)) {
1577 ret = PTR_ERR(pctrl_data->pins_sleep);
1578 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1579 goto out;
1580 }
1581 pdata->pctrl_data = pctrl_data;
1582out:
1583 return ret;
1584}
1585
Asutosh Das0ef24812012-12-18 16:14:02 +05301586#define GPIO_NAME_MAX_LEN 32
1587static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1588 struct sdhci_msm_pltfm_data *pdata)
1589{
1590 int ret = 0, cnt, i;
1591 struct sdhci_msm_pin_data *pin_data;
1592 struct device_node *np = dev->of_node;
1593
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301594 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1595 if (!ret) {
1596 goto out;
1597 } else if (ret == -EPROBE_DEFER) {
1598 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1599 goto out;
1600 } else {
1601 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1602 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301603 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301604 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301605 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1606 if (!pin_data) {
1607 dev_err(dev, "No memory for pin_data\n");
1608 ret = -ENOMEM;
1609 goto out;
1610 }
1611
1612 cnt = of_gpio_count(np);
1613 if (cnt > 0) {
1614 pin_data->gpio_data = devm_kzalloc(dev,
1615 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1616 if (!pin_data->gpio_data) {
1617 dev_err(dev, "No memory for gpio_data\n");
1618 ret = -ENOMEM;
1619 goto out;
1620 }
1621 pin_data->gpio_data->size = cnt;
1622 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1623 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1624
1625 if (!pin_data->gpio_data->gpio) {
1626 dev_err(dev, "No memory for gpio\n");
1627 ret = -ENOMEM;
1628 goto out;
1629 }
1630
1631 for (i = 0; i < cnt; i++) {
1632 const char *name = NULL;
1633 char result[GPIO_NAME_MAX_LEN];
1634 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1635 of_property_read_string_index(np,
1636 "qcom,gpio-names", i, &name);
1637
1638 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1639 dev_name(dev), name ? name : "?");
1640 pin_data->gpio_data->gpio[i].name = result;
1641 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1642 pin_data->gpio_data->gpio[i].name,
1643 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301644 }
1645 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301646 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301647out:
1648 if (ret)
1649 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1650 return ret;
1651}
1652
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001653#ifdef CONFIG_SMP
1654static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1655{
1656 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1657}
1658#else
1659static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1660#endif
1661
Gilad Bronerc788a672015-09-08 15:39:11 +03001662static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1663 struct sdhci_msm_pltfm_data *pdata)
1664{
1665 struct device_node *np = dev->of_node;
1666 const char *str;
1667 u32 cpu;
1668 int ret = 0;
1669 int i;
1670
1671 pdata->pm_qos_data.irq_valid = false;
1672 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1673 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1674 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001675 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001676 }
1677
1678 /* must specify cpu for "affine_cores" type */
1679 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1680 pdata->pm_qos_data.irq_cpu = -1;
1681 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1682 if (ret) {
1683 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1684 ret);
1685 goto out;
1686 }
1687 if (cpu < 0 || cpu >= num_possible_cpus()) {
1688 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1689 __func__, cpu, num_possible_cpus());
1690 ret = -EINVAL;
1691 goto out;
1692 }
1693 pdata->pm_qos_data.irq_cpu = cpu;
1694 }
1695
1696 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1697 SDHCI_POWER_POLICY_NUM) {
1698 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1699 __func__, SDHCI_POWER_POLICY_NUM);
1700 ret = -EINVAL;
1701 goto out;
1702 }
1703
1704 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1705 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1706 &pdata->pm_qos_data.irq_latency.latency[i]);
1707
1708 pdata->pm_qos_data.irq_valid = true;
1709out:
1710 return ret;
1711}
1712
1713static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1714 struct sdhci_msm_pltfm_data *pdata)
1715{
1716 struct device_node *np = dev->of_node;
1717 u32 mask;
1718 int nr_groups;
1719 int ret;
1720 int i;
1721
1722 /* Read cpu group mapping */
1723 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1724 if (nr_groups <= 0) {
1725 ret = -EINVAL;
1726 goto out;
1727 }
1728 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1729 pdata->pm_qos_data.cpu_group_map.mask =
1730 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1731 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1732 ret = -ENOMEM;
1733 goto out;
1734 }
1735
1736 for (i = 0; i < nr_groups; i++) {
1737 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1738 i, &mask);
1739
1740 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1741 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1742 cpu_possible_mask)) {
1743 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1744 __func__, mask, i);
1745 ret = -EINVAL;
1746 goto free_res;
1747 }
1748 }
1749 return 0;
1750
1751free_res:
1752 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1753out:
1754 return ret;
1755}
1756
1757static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1758 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1759{
1760 struct device_node *np = dev->of_node;
1761 struct sdhci_msm_pm_qos_latency *values;
1762 int ret;
1763 int i;
1764 int group;
1765 int cfg;
1766
1767 ret = of_property_count_u32_elems(np, name);
1768 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1769 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1770 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1771 ret);
1772 return -EINVAL;
1773 } else if (ret < 0) {
1774 return ret;
1775 }
1776
1777 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1778 GFP_KERNEL);
1779 if (!values)
1780 return -ENOMEM;
1781
1782 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1783 group = i / SDHCI_POWER_POLICY_NUM;
1784 cfg = i % SDHCI_POWER_POLICY_NUM;
1785 of_property_read_u32_index(np, name, i,
1786 &(values[group].latency[cfg]));
1787 }
1788
1789 *latency = values;
1790 return 0;
1791}
1792
1793static void sdhci_msm_pm_qos_parse(struct device *dev,
1794 struct sdhci_msm_pltfm_data *pdata)
1795{
1796 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1797 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1798 __func__);
1799
1800 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1801 pdata->pm_qos_data.cmdq_valid =
1802 !sdhci_msm_pm_qos_parse_latency(dev,
1803 "qcom,pm-qos-cmdq-latency-us",
1804 pdata->pm_qos_data.cpu_group_map.nr_groups,
1805 &pdata->pm_qos_data.cmdq_latency);
1806 pdata->pm_qos_data.legacy_valid =
1807 !sdhci_msm_pm_qos_parse_latency(dev,
1808 "qcom,pm-qos-legacy-latency-us",
1809 pdata->pm_qos_data.cpu_group_map.nr_groups,
1810 &pdata->pm_qos_data.latency);
1811 if (!pdata->pm_qos_data.cmdq_valid &&
1812 !pdata->pm_qos_data.legacy_valid) {
1813 /* clean-up previously allocated arrays */
1814 kfree(pdata->pm_qos_data.latency);
1815 kfree(pdata->pm_qos_data.cmdq_latency);
1816 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1817 __func__);
1818 }
1819 } else {
1820 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1821 __func__);
1822 }
1823}
1824
Asutosh Das0ef24812012-12-18 16:14:02 +05301825/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001826static
1827struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1828 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301829{
1830 struct sdhci_msm_pltfm_data *pdata = NULL;
1831 struct device_node *np = dev->of_node;
1832 u32 bus_width = 0;
1833 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301834 int clk_table_len;
1835 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301836 int ice_clk_table_len;
1837 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301838 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301839 const char *lower_bus_speed = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301840
1841 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1842 if (!pdata) {
1843 dev_err(dev, "failed to allocate memory for platform data\n");
1844 goto out;
1845 }
1846
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301847 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001848 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301849 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301850
Asutosh Das0ef24812012-12-18 16:14:02 +05301851 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1852 if (bus_width == 8)
1853 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1854 else if (bus_width == 4)
1855 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1856 else {
1857 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1858 pdata->mmc_bus_width = 0;
1859 }
1860
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001861 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301862 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1863 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001864 pr_debug("%s: no clock scaling frequencies were supplied\n",
1865 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301866 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1867 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1868 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001869
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301870 /*
1871 * Few hosts can support DDR52 mode at the same lower
1872 * system voltage corner as high-speed mode. In such cases,
1873 * it is always better to put it in DDR mode which will
1874 * improve the performance without any power impact.
1875 */
1876 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1877 &lower_bus_speed)) {
1878 if (!strcmp(lower_bus_speed, "DDR52"))
1879 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1880 MMC_SCALING_LOWER_DDR52_MODE;
1881 }
1882
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301883 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1884 &clk_table, &clk_table_len, 0)) {
1885 dev_err(dev, "failed parsing supported clock rates\n");
1886 goto out;
1887 }
1888 if (!clk_table || !clk_table_len) {
1889 dev_err(dev, "Invalid clock table\n");
1890 goto out;
1891 }
1892 pdata->sup_clk_table = clk_table;
1893 pdata->sup_clk_cnt = clk_table_len;
1894
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301895 if (msm_host->ice.pdev) {
1896 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
1897 &ice_clk_table, &ice_clk_table_len, 0)) {
1898 dev_err(dev, "failed parsing supported ice clock rates\n");
1899 goto out;
1900 }
1901 if (!ice_clk_table || !ice_clk_table_len) {
1902 dev_err(dev, "Invalid clock table\n");
1903 goto out;
1904 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05301905 if (ice_clk_table_len != 2) {
1906 dev_err(dev, "Need max and min frequencies in the table\n");
1907 goto out;
1908 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301909 pdata->sup_ice_clk_table = ice_clk_table;
1910 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05301911 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
1912 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
1913 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
1914 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301915 }
1916
Asutosh Das0ef24812012-12-18 16:14:02 +05301917 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1918 sdhci_msm_slot_reg_data),
1919 GFP_KERNEL);
1920 if (!pdata->vreg_data) {
1921 dev_err(dev, "failed to allocate memory for vreg data\n");
1922 goto out;
1923 }
1924
1925 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1926 "vdd")) {
1927 dev_err(dev, "failed parsing vdd data\n");
1928 goto out;
1929 }
1930 if (sdhci_msm_dt_parse_vreg_info(dev,
1931 &pdata->vreg_data->vdd_io_data,
1932 "vdd-io")) {
1933 dev_err(dev, "failed parsing vdd-io data\n");
1934 goto out;
1935 }
1936
1937 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1938 dev_err(dev, "failed parsing gpio data\n");
1939 goto out;
1940 }
1941
Asutosh Das0ef24812012-12-18 16:14:02 +05301942 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1943
1944 for (i = 0; i < len; i++) {
1945 const char *name = NULL;
1946
1947 of_property_read_string_index(np,
1948 "qcom,bus-speed-mode", i, &name);
1949 if (!name)
1950 continue;
1951
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001952 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1953 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1954 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1955 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1956 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301957 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1958 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1959 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1960 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1961 pdata->caps |= MMC_CAP_1_8V_DDR
1962 | MMC_CAP_UHS_DDR50;
1963 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1964 pdata->caps |= MMC_CAP_1_2V_DDR
1965 | MMC_CAP_UHS_DDR50;
1966 }
1967
1968 if (of_get_property(np, "qcom,nonremovable", NULL))
1969 pdata->nonremovable = true;
1970
Guoping Yuf7c91332014-08-20 16:56:18 +08001971 if (of_get_property(np, "qcom,nonhotplug", NULL))
1972 pdata->nonhotplug = true;
1973
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001974 pdata->largeaddressbus =
1975 of_property_read_bool(np, "qcom,large-address-bus");
1976
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001977 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1978 msm_host->mmc->wakeup_on_idle = true;
1979
Gilad Bronerc788a672015-09-08 15:39:11 +03001980 sdhci_msm_pm_qos_parse(dev, pdata);
1981
Pavan Anamula5a256df2015-10-16 14:38:28 +05301982 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05301983 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05301984
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001985 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07001986 msm_host->regs_restore.is_supported =
1987 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001988
Asutosh Das0ef24812012-12-18 16:14:02 +05301989 return pdata;
1990out:
1991 return NULL;
1992}
1993
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301994/* Returns required bandwidth in Bytes per Sec */
1995static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1996 struct mmc_ios *ios)
1997{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2000
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302001 unsigned int bw;
2002
Sahitya Tummala2886c922013-04-03 18:03:31 +05302003 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302004 /*
2005 * For DDR mode, SDCC controller clock will be at
2006 * the double rate than the actual clock that goes to card.
2007 */
2008 if (ios->bus_width == MMC_BUS_WIDTH_4)
2009 bw /= 2;
2010 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2011 bw /= 8;
2012
2013 return bw;
2014}
2015
2016static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2017 unsigned int bw)
2018{
2019 unsigned int *table = host->pdata->voting_data->bw_vecs;
2020 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2021 int i;
2022
2023 if (host->msm_bus_vote.is_max_bw_needed && bw)
2024 return host->msm_bus_vote.max_bw_vote;
2025
2026 for (i = 0; i < size; i++) {
2027 if (bw <= table[i])
2028 break;
2029 }
2030
2031 if (i && (i == size))
2032 i--;
2033
2034 return i;
2035}
2036
2037/*
2038 * This function must be called with host lock acquired.
2039 * Caller of this function should also ensure that msm bus client
2040 * handle is not null.
2041 */
2042static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2043 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302044 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302045{
2046 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2047 int rc = 0;
2048
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302049 BUG_ON(!flags);
2050
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302051 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302052 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302053 rc = msm_bus_scale_client_update_request(
2054 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302055 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302056 if (rc) {
2057 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2058 mmc_hostname(host->mmc),
2059 msm_host->msm_bus_vote.client_handle, vote, rc);
2060 goto out;
2061 }
2062 msm_host->msm_bus_vote.curr_vote = vote;
2063 }
2064out:
2065 return rc;
2066}
2067
2068/*
2069 * Internal work. Work to set 0 bandwidth for msm bus.
2070 */
2071static void sdhci_msm_bus_work(struct work_struct *work)
2072{
2073 struct sdhci_msm_host *msm_host;
2074 struct sdhci_host *host;
2075 unsigned long flags;
2076
2077 msm_host = container_of(work, struct sdhci_msm_host,
2078 msm_bus_vote.vote_work.work);
2079 host = platform_get_drvdata(msm_host->pdev);
2080
2081 if (!msm_host->msm_bus_vote.client_handle)
2082 return;
2083
2084 spin_lock_irqsave(&host->lock, flags);
2085 /* don't vote for 0 bandwidth if any request is in progress */
2086 if (!host->mrq) {
2087 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302088 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302089 } else
2090 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2091 mmc_hostname(host->mmc), __func__);
2092 spin_unlock_irqrestore(&host->lock, flags);
2093}
2094
2095/*
2096 * This function cancels any scheduled delayed work and sets the bus
2097 * vote based on bw (bandwidth) argument.
2098 */
2099static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2100 unsigned int bw)
2101{
2102 int vote;
2103 unsigned long flags;
2104 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2105 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2106
2107 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2108 spin_lock_irqsave(&host->lock, flags);
2109 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302110 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302111 spin_unlock_irqrestore(&host->lock, flags);
2112}
2113
2114#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2115
2116/* This function queues a work which will set the bandwidth requiement to 0 */
2117static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2118{
2119 unsigned long flags;
2120 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2121 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2122
2123 spin_lock_irqsave(&host->lock, flags);
2124 if (msm_host->msm_bus_vote.min_bw_vote !=
2125 msm_host->msm_bus_vote.curr_vote)
2126 queue_delayed_work(system_wq,
2127 &msm_host->msm_bus_vote.vote_work,
2128 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2129 spin_unlock_irqrestore(&host->lock, flags);
2130}
2131
2132static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2133 struct platform_device *pdev)
2134{
2135 int rc = 0;
2136 struct msm_bus_scale_pdata *bus_pdata;
2137
2138 struct sdhci_msm_bus_voting_data *data;
2139 struct device *dev = &pdev->dev;
2140
2141 data = devm_kzalloc(dev,
2142 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2143 if (!data) {
2144 dev_err(&pdev->dev,
2145 "%s: failed to allocate memory\n", __func__);
2146 rc = -ENOMEM;
2147 goto out;
2148 }
2149 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2150 if (data->bus_pdata) {
2151 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2152 &data->bw_vecs, &data->bw_vecs_size, 0);
2153 if (rc) {
2154 dev_err(&pdev->dev,
2155 "%s: Failed to get bus-bw-vectors-bps\n",
2156 __func__);
2157 goto out;
2158 }
2159 host->pdata->voting_data = data;
2160 }
2161 if (host->pdata->voting_data &&
2162 host->pdata->voting_data->bus_pdata &&
2163 host->pdata->voting_data->bw_vecs &&
2164 host->pdata->voting_data->bw_vecs_size) {
2165
2166 bus_pdata = host->pdata->voting_data->bus_pdata;
2167 host->msm_bus_vote.client_handle =
2168 msm_bus_scale_register_client(bus_pdata);
2169 if (!host->msm_bus_vote.client_handle) {
2170 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2171 rc = -EFAULT;
2172 goto out;
2173 }
2174 /* cache the vote index for minimum and maximum bandwidth */
2175 host->msm_bus_vote.min_bw_vote =
2176 sdhci_msm_bus_get_vote_for_bw(host, 0);
2177 host->msm_bus_vote.max_bw_vote =
2178 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2179 } else {
2180 devm_kfree(dev, data);
2181 }
2182
2183out:
2184 return rc;
2185}
2186
2187static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2188{
2189 if (host->msm_bus_vote.client_handle)
2190 msm_bus_scale_unregister_client(
2191 host->msm_bus_vote.client_handle);
2192}
2193
2194static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2195{
2196 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2197 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2198 struct mmc_ios *ios = &host->mmc->ios;
2199 unsigned int bw;
2200
2201 if (!msm_host->msm_bus_vote.client_handle)
2202 return;
2203
2204 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302205 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302206 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302207 } else {
2208 /*
2209 * If clock gating is enabled, then remove the vote
2210 * immediately because clocks will be disabled only
2211 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2212 * additional delay is required to remove the bus vote.
2213 */
2214#ifdef CONFIG_MMC_CLKGATE
2215 if (host->mmc->clkgate_delay)
2216 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2217 else
2218#endif
2219 sdhci_msm_bus_queue_work(host);
2220 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302221}
2222
Asutosh Das0ef24812012-12-18 16:14:02 +05302223/* Regulator utility functions */
2224static int sdhci_msm_vreg_init_reg(struct device *dev,
2225 struct sdhci_msm_reg_data *vreg)
2226{
2227 int ret = 0;
2228
2229 /* check if regulator is already initialized? */
2230 if (vreg->reg)
2231 goto out;
2232
2233 /* Get the regulator handle */
2234 vreg->reg = devm_regulator_get(dev, vreg->name);
2235 if (IS_ERR(vreg->reg)) {
2236 ret = PTR_ERR(vreg->reg);
2237 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2238 __func__, vreg->name, ret);
2239 goto out;
2240 }
2241
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302242 if (regulator_count_voltages(vreg->reg) > 0) {
2243 vreg->set_voltage_sup = true;
2244 /* sanity check */
2245 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2246 pr_err("%s: %s invalid constraints specified\n",
2247 __func__, vreg->name);
2248 ret = -EINVAL;
2249 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302250 }
2251
2252out:
2253 return ret;
2254}
2255
2256static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2257{
2258 if (vreg->reg)
2259 devm_regulator_put(vreg->reg);
2260}
2261
2262static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2263 *vreg, int uA_load)
2264{
2265 int ret = 0;
2266
2267 /*
2268 * regulators that do not support regulator_set_voltage also
2269 * do not support regulator_set_optimum_mode
2270 */
2271 if (vreg->set_voltage_sup) {
2272 ret = regulator_set_load(vreg->reg, uA_load);
2273 if (ret < 0)
2274 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2275 __func__, vreg->name, uA_load, ret);
2276 else
2277 /*
2278 * regulator_set_load() can return non zero
2279 * value even for success case.
2280 */
2281 ret = 0;
2282 }
2283 return ret;
2284}
2285
2286static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2287 int min_uV, int max_uV)
2288{
2289 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302290 if (vreg->set_voltage_sup) {
2291 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2292 if (ret) {
2293 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302294 __func__, vreg->name, min_uV, max_uV, ret);
2295 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302296 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302297
2298 return ret;
2299}
2300
2301static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2302{
2303 int ret = 0;
2304
2305 /* Put regulator in HPM (high power mode) */
2306 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2307 if (ret < 0)
2308 return ret;
2309
2310 if (!vreg->is_enabled) {
2311 /* Set voltage level */
2312 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2313 vreg->high_vol_level);
2314 if (ret)
2315 return ret;
2316 }
2317 ret = regulator_enable(vreg->reg);
2318 if (ret) {
2319 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2320 __func__, vreg->name, ret);
2321 return ret;
2322 }
2323 vreg->is_enabled = true;
2324 return ret;
2325}
2326
2327static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2328{
2329 int ret = 0;
2330
2331 /* Never disable regulator marked as always_on */
2332 if (vreg->is_enabled && !vreg->is_always_on) {
2333 ret = regulator_disable(vreg->reg);
2334 if (ret) {
2335 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2336 __func__, vreg->name, ret);
2337 goto out;
2338 }
2339 vreg->is_enabled = false;
2340
2341 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2342 if (ret < 0)
2343 goto out;
2344
2345 /* Set min. voltage level to 0 */
2346 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2347 if (ret)
2348 goto out;
2349 } else if (vreg->is_enabled && vreg->is_always_on) {
2350 if (vreg->lpm_sup) {
2351 /* Put always_on regulator in LPM (low power mode) */
2352 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2353 vreg->lpm_uA);
2354 if (ret < 0)
2355 goto out;
2356 }
2357 }
2358out:
2359 return ret;
2360}
2361
2362static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2363 bool enable, bool is_init)
2364{
2365 int ret = 0, i;
2366 struct sdhci_msm_slot_reg_data *curr_slot;
2367 struct sdhci_msm_reg_data *vreg_table[2];
2368
2369 curr_slot = pdata->vreg_data;
2370 if (!curr_slot) {
2371 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2372 __func__);
2373 goto out;
2374 }
2375
2376 vreg_table[0] = curr_slot->vdd_data;
2377 vreg_table[1] = curr_slot->vdd_io_data;
2378
2379 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2380 if (vreg_table[i]) {
2381 if (enable)
2382 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2383 else
2384 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2385 if (ret)
2386 goto out;
2387 }
2388 }
2389out:
2390 return ret;
2391}
2392
Asutosh Das0ef24812012-12-18 16:14:02 +05302393/* This init function should be called only once for each SDHC slot */
2394static int sdhci_msm_vreg_init(struct device *dev,
2395 struct sdhci_msm_pltfm_data *pdata,
2396 bool is_init)
2397{
2398 int ret = 0;
2399 struct sdhci_msm_slot_reg_data *curr_slot;
2400 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2401
2402 curr_slot = pdata->vreg_data;
2403 if (!curr_slot)
2404 goto out;
2405
2406 curr_vdd_reg = curr_slot->vdd_data;
2407 curr_vdd_io_reg = curr_slot->vdd_io_data;
2408
2409 if (!is_init)
2410 /* Deregister all regulators from regulator framework */
2411 goto vdd_io_reg_deinit;
2412
2413 /*
2414 * Get the regulator handle from voltage regulator framework
2415 * and then try to set the voltage level for the regulator
2416 */
2417 if (curr_vdd_reg) {
2418 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2419 if (ret)
2420 goto out;
2421 }
2422 if (curr_vdd_io_reg) {
2423 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2424 if (ret)
2425 goto vdd_reg_deinit;
2426 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302427
Asutosh Das0ef24812012-12-18 16:14:02 +05302428 if (ret)
2429 dev_err(dev, "vreg reset failed (%d)\n", ret);
2430 goto out;
2431
2432vdd_io_reg_deinit:
2433 if (curr_vdd_io_reg)
2434 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2435vdd_reg_deinit:
2436 if (curr_vdd_reg)
2437 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2438out:
2439 return ret;
2440}
2441
2442
2443static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2444 enum vdd_io_level level,
2445 unsigned int voltage_level)
2446{
2447 int ret = 0;
2448 int set_level;
2449 struct sdhci_msm_reg_data *vdd_io_reg;
2450
2451 if (!pdata->vreg_data)
2452 return ret;
2453
2454 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2455 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2456 switch (level) {
2457 case VDD_IO_LOW:
2458 set_level = vdd_io_reg->low_vol_level;
2459 break;
2460 case VDD_IO_HIGH:
2461 set_level = vdd_io_reg->high_vol_level;
2462 break;
2463 case VDD_IO_SET_LEVEL:
2464 set_level = voltage_level;
2465 break;
2466 default:
2467 pr_err("%s: invalid argument level = %d",
2468 __func__, level);
2469 ret = -EINVAL;
2470 return ret;
2471 }
2472 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2473 set_level);
2474 }
2475 return ret;
2476}
2477
Ritesh Harjani42876f42015-11-17 17:46:51 +05302478/*
2479 * Acquire spin-lock host->lock before calling this function
2480 */
2481static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2482 bool enable)
2483{
2484 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2485 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2486
2487 if (enable && !msm_host->is_sdiowakeup_enabled)
2488 enable_irq(msm_host->pdata->sdiowakeup_irq);
2489 else if (!enable && msm_host->is_sdiowakeup_enabled)
2490 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2491 else
2492 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2493 __func__, enable, msm_host->is_sdiowakeup_enabled);
2494 msm_host->is_sdiowakeup_enabled = enable;
2495}
2496
2497static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2498{
2499 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302500 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2501 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2502
Ritesh Harjani42876f42015-11-17 17:46:51 +05302503 unsigned long flags;
2504
2505 pr_debug("%s: irq (%d) received\n", __func__, irq);
2506
2507 spin_lock_irqsave(&host->lock, flags);
2508 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2509 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302510 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302511
2512 return IRQ_HANDLED;
2513}
2514
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302515void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2516{
2517 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2518 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302519 const struct sdhci_msm_offset *msm_host_offset =
2520 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302521 unsigned int irq_flags = 0;
2522 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302523
Siba Prasad0196fe42017-06-27 15:13:27 +05302524 if (pwr_irq_desc)
2525 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2526 state_use_accessors);
2527
2528 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302529 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302530 sdhci_msm_readl_relaxed(host,
2531 msm_host_offset->CORE_PWRCTL_STATUS),
2532 sdhci_msm_readl_relaxed(host,
2533 msm_host_offset->CORE_PWRCTL_MASK),
2534 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302535 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2536
2537 MMC_TRACE(host->mmc,
2538 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2539 __func__,
2540 sdhci_msm_readb_relaxed(host,
2541 msm_host_offset->CORE_PWRCTL_STATUS),
2542 sdhci_msm_readb_relaxed(host,
2543 msm_host_offset->CORE_PWRCTL_MASK),
2544 sdhci_msm_readb_relaxed(host,
2545 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302546}
2547
Asutosh Das0ef24812012-12-18 16:14:02 +05302548static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2549{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002550 struct sdhci_host *host = (struct sdhci_host *)data;
2551 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2552 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302553 const struct sdhci_msm_offset *msm_host_offset =
2554 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302555 u8 irq_status = 0;
2556 u8 irq_ack = 0;
2557 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302558 int pwr_state = 0, io_level = 0;
2559 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302560 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302561
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302562 irq_status = sdhci_msm_readb_relaxed(host,
2563 msm_host_offset->CORE_PWRCTL_STATUS);
2564
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2566 mmc_hostname(msm_host->mmc), irq, irq_status);
2567
2568 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302569 sdhci_msm_writeb_relaxed(irq_status, host,
2570 msm_host_offset->CORE_PWRCTL_CLEAR);
2571
Asutosh Das0ef24812012-12-18 16:14:02 +05302572 /*
2573 * SDHC has core_mem and hc_mem device memory and these memory
2574 * addresses do not fall within 1KB region. Hence, any update to
2575 * core_mem address space would require an mb() to ensure this gets
2576 * completed before its next update to registers within hc_mem.
2577 */
2578 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302579 /*
2580 * There is a rare HW scenario where the first clear pulse could be
2581 * lost when actual reset and clear/read of status register is
2582 * happening at a time. Hence, retry for at least 10 times to make
2583 * sure status register is cleared. Otherwise, this will result in
2584 * a spurious power IRQ resulting in system instability.
2585 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302586 while (irq_status & sdhci_msm_readb_relaxed(host,
2587 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302588 if (retry == 0) {
2589 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2590 mmc_hostname(host->mmc), irq_status);
2591 sdhci_msm_dump_pwr_ctrl_regs(host);
2592 BUG_ON(1);
2593 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302594 sdhci_msm_writeb_relaxed(irq_status, host,
2595 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302596 retry--;
2597 udelay(10);
2598 }
2599 if (likely(retry < 10))
2600 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2601 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302602
2603 /* Handle BUS ON/OFF*/
2604 if (irq_status & CORE_PWRCTL_BUS_ON) {
2605 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302606 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302607 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302608 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2609 VDD_IO_HIGH, 0);
2610 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302611 if (ret)
2612 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2613 else
2614 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302615
2616 pwr_state = REQ_BUS_ON;
2617 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302618 }
2619 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302620 if (msm_host->pltfm_init_done)
2621 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2622 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302623 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302624 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302625 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2626 VDD_IO_LOW, 0);
2627 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302628 if (ret)
2629 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2630 else
2631 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302632
2633 pwr_state = REQ_BUS_OFF;
2634 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302635 }
2636 /* Handle IO LOW/HIGH */
2637 if (irq_status & CORE_PWRCTL_IO_LOW) {
2638 /* Switch voltage Low */
2639 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2640 if (ret)
2641 irq_ack |= CORE_PWRCTL_IO_FAIL;
2642 else
2643 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302644
2645 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302646 }
2647 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2648 /* Switch voltage High */
2649 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2650 if (ret)
2651 irq_ack |= CORE_PWRCTL_IO_FAIL;
2652 else
2653 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302654
2655 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302656 }
2657
2658 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302659 sdhci_msm_writeb_relaxed(irq_ack, host,
2660 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302661 /*
2662 * SDHC has core_mem and hc_mem device memory and these memory
2663 * addresses do not fall within 1KB region. Hence, any update to
2664 * core_mem address space would require an mb() to ensure this gets
2665 * completed before its next update to registers within hc_mem.
2666 */
2667 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302668 if ((io_level & REQ_IO_HIGH) &&
2669 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2670 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302671 writel_relaxed((readl_relaxed(host->ioaddr +
2672 msm_host_offset->CORE_VENDOR_SPEC) &
2673 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2674 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002675 else if ((io_level & REQ_IO_LOW) ||
2676 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302677 writel_relaxed((readl_relaxed(host->ioaddr +
2678 msm_host_offset->CORE_VENDOR_SPEC) |
2679 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2680 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002681 mb();
2682
Asutosh Das0ef24812012-12-18 16:14:02 +05302683 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2684 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302685 spin_lock_irqsave(&host->lock, flags);
2686 if (pwr_state)
2687 msm_host->curr_pwr_state = pwr_state;
2688 if (io_level)
2689 msm_host->curr_io_level = io_level;
2690 complete(&msm_host->pwr_irq_completion);
2691 spin_unlock_irqrestore(&host->lock, flags);
2692
Asutosh Das0ef24812012-12-18 16:14:02 +05302693 return IRQ_HANDLED;
2694}
2695
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302696static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302697show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2698{
2699 struct sdhci_host *host = dev_get_drvdata(dev);
2700 int poll;
2701 unsigned long flags;
2702
2703 spin_lock_irqsave(&host->lock, flags);
2704 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2705 spin_unlock_irqrestore(&host->lock, flags);
2706
2707 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2708}
2709
2710static ssize_t
2711store_polling(struct device *dev, struct device_attribute *attr,
2712 const char *buf, size_t count)
2713{
2714 struct sdhci_host *host = dev_get_drvdata(dev);
2715 int value;
2716 unsigned long flags;
2717
2718 if (!kstrtou32(buf, 0, &value)) {
2719 spin_lock_irqsave(&host->lock, flags);
2720 if (value) {
2721 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2722 mmc_detect_change(host->mmc, 0);
2723 } else {
2724 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2725 }
2726 spin_unlock_irqrestore(&host->lock, flags);
2727 }
2728 return count;
2729}
2730
2731static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302732show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2733 char *buf)
2734{
2735 struct sdhci_host *host = dev_get_drvdata(dev);
2736 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2737 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2738
2739 return snprintf(buf, PAGE_SIZE, "%u\n",
2740 msm_host->msm_bus_vote.is_max_bw_needed);
2741}
2742
2743static ssize_t
2744store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2745 const char *buf, size_t count)
2746{
2747 struct sdhci_host *host = dev_get_drvdata(dev);
2748 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2749 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2750 uint32_t value;
2751 unsigned long flags;
2752
2753 if (!kstrtou32(buf, 0, &value)) {
2754 spin_lock_irqsave(&host->lock, flags);
2755 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2756 spin_unlock_irqrestore(&host->lock, flags);
2757 }
2758 return count;
2759}
2760
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302761static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302762{
2763 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2764 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302765 const struct sdhci_msm_offset *msm_host_offset =
2766 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302767 unsigned long flags;
2768 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302769 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302770
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302771 spin_lock_irqsave(&host->lock, flags);
2772 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2773 mmc_hostname(host->mmc), __func__, req_type,
2774 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302775 if (!msm_host->mci_removed)
2776 io_sig_sts = sdhci_msm_readl_relaxed(host,
2777 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302778
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302779 /*
2780 * The IRQ for request type IO High/Low will be generated when -
2781 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2782 * 2. If 1 is true and when there is a state change in 1.8V enable
2783 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2784 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2785 * layer tries to set it to 3.3V before card detection happens, the
2786 * IRQ doesn't get triggered as there is no state change in this bit.
2787 * The driver already handles this case by changing the IO voltage
2788 * level to high as part of controller power up sequence. Hence, check
2789 * for host->pwr to handle a case where IO voltage high request is
2790 * issued even before controller power up.
2791 */
2792 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2793 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2794 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2795 pr_debug("%s: do not wait for power IRQ that never comes\n",
2796 mmc_hostname(host->mmc));
2797 spin_unlock_irqrestore(&host->lock, flags);
2798 return;
2799 }
2800 }
2801
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302802 if ((req_type & msm_host->curr_pwr_state) ||
2803 (req_type & msm_host->curr_io_level))
2804 done = true;
2805 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302806
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302807 /*
2808 * This is needed here to hanlde a case where IRQ gets
2809 * triggered even before this function is called so that
2810 * x->done counter of completion gets reset. Otherwise,
2811 * next call to wait_for_completion returns immediately
2812 * without actually waiting for the IRQ to be handled.
2813 */
2814 if (done)
2815 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302816 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05302817 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05302818 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2819 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05302820 MMC_TRACE(host->mmc,
2821 "%s: request(%d) timed out waiting for pwr_irq\n",
2822 __func__, req_type);
2823 sdhci_msm_dump_pwr_ctrl_regs(host);
2824 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302825 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2826 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302827}
2828
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002829static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2830{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302831 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2832 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2833 const struct sdhci_msm_offset *msm_host_offset =
2834 msm_host->offset;
2835 u32 config = readl_relaxed(host->ioaddr +
2836 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302837
2838 if (enable) {
2839 config |= CORE_CDR_EN;
2840 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302841 writel_relaxed(config, host->ioaddr +
2842 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302843 } else {
2844 config &= ~CORE_CDR_EN;
2845 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302846 writel_relaxed(config, host->ioaddr +
2847 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302848 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002849}
2850
Asutosh Das648f9d12013-01-10 21:11:04 +05302851static unsigned int sdhci_msm_max_segs(void)
2852{
2853 return SDHCI_MSM_MAX_SEGMENTS;
2854}
2855
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302856static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302857{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302858 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2859 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302860
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302861 return msm_host->pdata->sup_clk_table[0];
2862}
2863
2864static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2865{
2866 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2867 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2868 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2869
2870 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2871}
2872
2873static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2874 u32 req_clk)
2875{
2876 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2877 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2878 unsigned int sel_clk = -1;
2879 unsigned char cnt;
2880
2881 if (req_clk < sdhci_msm_get_min_clock(host)) {
2882 sel_clk = sdhci_msm_get_min_clock(host);
2883 return sel_clk;
2884 }
2885
2886 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2887 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2888 break;
2889 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2890 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2891 break;
2892 } else {
2893 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2894 }
2895 }
2896 return sel_clk;
2897}
2898
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002899static void sdhci_msm_registers_save(struct sdhci_host *host)
2900{
2901 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2902 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2903 const struct sdhci_msm_offset *msm_host_offset =
2904 msm_host->offset;
2905
2906 if (!msm_host->regs_restore.is_supported)
2907 return;
2908
2909 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
2910 msm_host_offset->CORE_VENDOR_SPEC);
2911 msm_host->regs_restore.vendor_pwrctl_mask =
2912 readl_relaxed(host->ioaddr +
2913 msm_host_offset->CORE_PWRCTL_MASK);
2914 msm_host->regs_restore.vendor_func2 =
2915 readl_relaxed(host->ioaddr +
2916 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2917 msm_host->regs_restore.vendor_func3 =
2918 readl_relaxed(host->ioaddr +
2919 msm_host_offset->CORE_VENDOR_SPEC3);
2920 msm_host->regs_restore.hc_2c_2e =
2921 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
2922 msm_host->regs_restore.hc_3c_3e =
2923 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
2924 msm_host->regs_restore.vendor_pwrctl_ctl =
2925 readl_relaxed(host->ioaddr +
2926 msm_host_offset->CORE_PWRCTL_CTL);
2927 msm_host->regs_restore.hc_38_3a =
2928 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
2929 msm_host->regs_restore.hc_34_36 =
2930 sdhci_readl(host, SDHCI_INT_ENABLE);
2931 msm_host->regs_restore.hc_28_2a =
2932 sdhci_readl(host, SDHCI_HOST_CONTROL);
2933 msm_host->regs_restore.vendor_caps_0 =
2934 readl_relaxed(host->ioaddr +
2935 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2936 msm_host->regs_restore.hc_caps_1 =
2937 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2938 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
2939 msm_host_offset->CORE_TESTBUS_CONFIG);
2940 msm_host->regs_restore.is_valid = true;
2941
2942 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
2943 mmc_hostname(host->mmc), __func__,
2944 readl_relaxed(host->ioaddr +
2945 msm_host_offset->CORE_PWRCTL_MASK));
2946}
2947
2948static void sdhci_msm_registers_restore(struct sdhci_host *host)
2949{
2950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2951 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2952 const struct sdhci_msm_offset *msm_host_offset =
2953 msm_host->offset;
2954
2955 if (!msm_host->regs_restore.is_supported ||
2956 !msm_host->regs_restore.is_valid)
2957 return;
2958
2959 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
2960 msm_host_offset->CORE_VENDOR_SPEC);
2961 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
2962 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
2963 writel_relaxed(msm_host->regs_restore.vendor_func2,
2964 host->ioaddr +
2965 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2966 writel_relaxed(msm_host->regs_restore.vendor_func3,
2967 host->ioaddr +
2968 msm_host_offset->CORE_VENDOR_SPEC3);
2969 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
2970 SDHCI_CLOCK_CONTROL);
2971 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
2972 SDHCI_AUTO_CMD_ERR);
2973 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
2974 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
2975 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
2976 SDHCI_SIGNAL_ENABLE);
2977 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
2978 SDHCI_INT_ENABLE);
2979 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
2980 SDHCI_HOST_CONTROL);
2981 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
2982 host->ioaddr +
2983 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2984 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
2985 SDHCI_CAPABILITIES_1);
2986 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
2987 msm_host_offset->CORE_TESTBUS_CONFIG);
2988 msm_host->regs_restore.is_valid = false;
2989
2990 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
2991 mmc_hostname(host->mmc), __func__,
2992 readl_relaxed(host->ioaddr +
2993 msm_host_offset->CORE_PWRCTL_MASK));
2994}
2995
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302996static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2997{
2998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3000 int rc = 0;
3001
3002 if (atomic_read(&msm_host->controller_clock))
3003 return 0;
3004
3005 sdhci_msm_bus_voting(host, 1);
3006
3007 if (!IS_ERR(msm_host->pclk)) {
3008 rc = clk_prepare_enable(msm_host->pclk);
3009 if (rc) {
3010 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3011 mmc_hostname(host->mmc), __func__, rc);
3012 goto remove_vote;
3013 }
3014 }
3015
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303016 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3017 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3018 if (rc) {
3019 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3020 mmc_hostname(host->mmc), __func__, rc);
3021 goto disable_pclk;
3022 }
3023 }
3024
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303025 rc = clk_prepare_enable(msm_host->clk);
3026 if (rc) {
3027 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3028 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303029 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303030 }
3031
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303032 if (!IS_ERR(msm_host->ice_clk)) {
3033 rc = clk_prepare_enable(msm_host->ice_clk);
3034 if (rc) {
3035 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3036 mmc_hostname(host->mmc), __func__, rc);
3037 goto disable_host_clk;
3038 }
3039 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303040 atomic_set(&msm_host->controller_clock, 1);
3041 pr_debug("%s: %s: enabled controller clock\n",
3042 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003043 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303044 goto out;
3045
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303046disable_host_clk:
3047 if (!IS_ERR(msm_host->clk))
3048 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303049disable_bus_aggr_clk:
3050 if (!IS_ERR(msm_host->bus_aggr_clk))
3051 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303052disable_pclk:
3053 if (!IS_ERR(msm_host->pclk))
3054 clk_disable_unprepare(msm_host->pclk);
3055remove_vote:
3056 if (msm_host->msm_bus_vote.client_handle)
3057 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3058out:
3059 return rc;
3060}
3061
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303062static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3063{
3064 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3065 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303066
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303067 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003068 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303069 if (!IS_ERR(msm_host->clk))
3070 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303071 if (!IS_ERR(msm_host->ice_clk))
3072 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303073 if (!IS_ERR(msm_host->bus_aggr_clk))
3074 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303075 if (!IS_ERR(msm_host->pclk))
3076 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303077 sdhci_msm_bus_voting(host, 0);
3078 atomic_set(&msm_host->controller_clock, 0);
3079 pr_debug("%s: %s: disabled controller clock\n",
3080 mmc_hostname(host->mmc), __func__);
3081 }
3082}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303083
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303084static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3085{
3086 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3087 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3088 int rc = 0;
3089
3090 if (enable && !atomic_read(&msm_host->clks_on)) {
3091 pr_debug("%s: request to enable clocks\n",
3092 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303093
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303094 /*
3095 * The bus-width or the clock rate might have changed
3096 * after controller clocks are enbaled, update bus vote
3097 * in such case.
3098 */
3099 if (atomic_read(&msm_host->controller_clock))
3100 sdhci_msm_bus_voting(host, 1);
3101
3102 rc = sdhci_msm_enable_controller_clock(host);
3103 if (rc)
3104 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303105
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303106 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3107 rc = clk_prepare_enable(msm_host->bus_clk);
3108 if (rc) {
3109 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3110 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303111 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303112 }
3113 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003114 if (!IS_ERR(msm_host->ff_clk)) {
3115 rc = clk_prepare_enable(msm_host->ff_clk);
3116 if (rc) {
3117 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3118 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303119 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003120 }
3121 }
3122 if (!IS_ERR(msm_host->sleep_clk)) {
3123 rc = clk_prepare_enable(msm_host->sleep_clk);
3124 if (rc) {
3125 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3126 mmc_hostname(host->mmc), __func__, rc);
3127 goto disable_ff_clk;
3128 }
3129 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303130 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303131
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303132 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303133 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3134 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303135 /*
3136 * During 1.8V signal switching the clock source must
3137 * still be ON as it requires accessing SDHC
3138 * registers (SDHCi host control2 register bit 3 must
3139 * be written and polled after stopping the SDCLK).
3140 */
3141 if (host->mmc->card_clock_off)
3142 return 0;
3143 pr_debug("%s: request to disable clocks\n",
3144 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003145 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3146 clk_disable_unprepare(msm_host->sleep_clk);
3147 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3148 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303149 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3150 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003151 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303152 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303153 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303154 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003155disable_ff_clk:
3156 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3157 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303158disable_bus_clk:
3159 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3160 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303161disable_controller_clk:
3162 if (!IS_ERR_OR_NULL(msm_host->clk))
3163 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303164 if (!IS_ERR(msm_host->ice_clk))
3165 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303166 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3167 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303168 if (!IS_ERR_OR_NULL(msm_host->pclk))
3169 clk_disable_unprepare(msm_host->pclk);
3170 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303171remove_vote:
3172 if (msm_host->msm_bus_vote.client_handle)
3173 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303174out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303175 return rc;
3176}
3177
3178static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3179{
3180 int rc;
3181 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3182 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303183 const struct sdhci_msm_offset *msm_host_offset =
3184 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003185 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303186 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003187 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303188 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303189
3190 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303191 /*
3192 * disable pwrsave to ensure clock is not auto-gated until
3193 * the rate is >400KHz (initialization complete).
3194 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303195 writel_relaxed(readl_relaxed(host->ioaddr +
3196 msm_host_offset->CORE_VENDOR_SPEC) &
3197 ~CORE_CLK_PWRSAVE, host->ioaddr +
3198 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303199 sdhci_msm_prepare_clocks(host, false);
3200 host->clock = clock;
3201 goto out;
3202 }
3203
3204 rc = sdhci_msm_prepare_clocks(host, true);
3205 if (rc)
3206 goto out;
3207
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303208 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3209 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303210 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003211 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303212 writel_relaxed(readl_relaxed(host->ioaddr +
3213 msm_host_offset->CORE_VENDOR_SPEC)
3214 | CORE_CLK_PWRSAVE, host->ioaddr +
3215 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303216 /*
3217 * Disable pwrsave for a newly added card if doesn't allow clock
3218 * gating.
3219 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003220 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303221 writel_relaxed(readl_relaxed(host->ioaddr +
3222 msm_host_offset->CORE_VENDOR_SPEC)
3223 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3224 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303225
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303226 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003227 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003228 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003229 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303230 /*
3231 * The SDHC requires internal clock frequency to be double the
3232 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003233 * uses the faster clock(100/400MHz) for some of its parts and
3234 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303235 */
3236 ddr_clock = clock * 2;
3237 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3238 ddr_clock);
3239 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003240
3241 /*
3242 * In general all timing modes are controlled via UHS mode select in
3243 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3244 * their respective modes defined here, hence we use these values.
3245 *
3246 * HS200 - SDR104 (Since they both are equivalent in functionality)
3247 * HS400 - This involves multiple configurations
3248 * Initially SDR104 - when tuning is required as HS200
3249 * Then when switching to DDR @ 400MHz (HS400) we use
3250 * the vendor specific HC_SELECT_IN to control the mode.
3251 *
3252 * In addition to controlling the modes we also need to select the
3253 * correct input clock for DLL depending on the mode.
3254 *
3255 * HS400 - divided clock (free running MCLK/2)
3256 * All other modes - default (free running MCLK)
3257 */
3258 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3259 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303260 writel_relaxed(((readl_relaxed(host->ioaddr +
3261 msm_host_offset->CORE_VENDOR_SPEC)
3262 & ~CORE_HC_MCLK_SEL_MASK)
3263 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3264 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003265 /*
3266 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3267 * register
3268 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303269 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003270 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303271 msm_host->enhanced_strobe)) &&
3272 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003273 /*
3274 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3275 * field in VENDOR_SPEC_FUNC
3276 */
3277 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303278 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003279 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303280 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3281 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003282 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003283 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3284 /*
3285 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3286 * CORE_DLL_STATUS to be set. This should get set
3287 * with in 15 us at 200 MHz.
3288 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303289 rc = readl_poll_timeout(host->ioaddr +
3290 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003291 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3292 CORE_DDR_DLL_LOCK)), 10, 1000);
3293 if (rc == -ETIMEDOUT)
3294 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3295 mmc_hostname(host->mmc),
3296 dll_lock);
3297 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003298 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003299 if (!msm_host->use_cdclp533)
3300 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3301 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303302 msm_host_offset->CORE_VENDOR_SPEC3)
3303 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3304 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003305
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003306 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303307 writel_relaxed(((readl_relaxed(host->ioaddr +
3308 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003309 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303310 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3311 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003312
3313 /*
3314 * Disable HC_SELECT_IN to be able to use the UHS mode select
3315 * configuration from Host Control2 register for all other
3316 * modes.
3317 *
3318 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3319 * in VENDOR_SPEC_FUNC
3320 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303321 writel_relaxed((readl_relaxed(host->ioaddr +
3322 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003323 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303324 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3325 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003326 }
3327 mb();
3328
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303329 if (sup_clock != msm_host->clk_rate) {
3330 pr_debug("%s: %s: setting clk rate to %u\n",
3331 mmc_hostname(host->mmc), __func__, sup_clock);
3332 rc = clk_set_rate(msm_host->clk, sup_clock);
3333 if (rc) {
3334 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3335 mmc_hostname(host->mmc), __func__,
3336 sup_clock, rc);
3337 goto out;
3338 }
3339 msm_host->clk_rate = sup_clock;
3340 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303341 /*
3342 * Update the bus vote in case of frequency change due to
3343 * clock scaling.
3344 */
3345 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303346 }
3347out:
3348 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303349}
3350
Sahitya Tummala14613432013-03-21 11:13:25 +05303351static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3352 unsigned int uhs)
3353{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003354 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3355 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303356 const struct sdhci_msm_offset *msm_host_offset =
3357 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303358 u16 ctrl_2;
3359
3360 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3361 /* Select Bus Speed Mode for host */
3362 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003363 if ((uhs == MMC_TIMING_MMC_HS400) ||
3364 (uhs == MMC_TIMING_MMC_HS200) ||
3365 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303366 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3367 else if (uhs == MMC_TIMING_UHS_SDR12)
3368 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3369 else if (uhs == MMC_TIMING_UHS_SDR25)
3370 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3371 else if (uhs == MMC_TIMING_UHS_SDR50)
3372 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003373 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3374 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303375 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303376 /*
3377 * When clock frquency is less than 100MHz, the feedback clock must be
3378 * provided and DLL must not be used so that tuning can be skipped. To
3379 * provide feedback clock, the mode selection can be any value less
3380 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3381 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003382 if (host->clock <= CORE_FREQ_100MHZ) {
3383 if ((uhs == MMC_TIMING_MMC_HS400) ||
3384 (uhs == MMC_TIMING_MMC_HS200) ||
3385 (uhs == MMC_TIMING_UHS_SDR104))
3386 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303387
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003388 /*
3389 * Make sure DLL is disabled when not required
3390 *
3391 * Write 1 to DLL_RST bit of DLL_CONFIG register
3392 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303393 writel_relaxed((readl_relaxed(host->ioaddr +
3394 msm_host_offset->CORE_DLL_CONFIG)
3395 | CORE_DLL_RST), host->ioaddr +
3396 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003397
3398 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303399 writel_relaxed((readl_relaxed(host->ioaddr +
3400 msm_host_offset->CORE_DLL_CONFIG)
3401 | CORE_DLL_PDN), host->ioaddr +
3402 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003403 mb();
3404
3405 /*
3406 * The DLL needs to be restored and CDCLP533 recalibrated
3407 * when the clock frequency is set back to 400MHz.
3408 */
3409 msm_host->calibration_done = false;
3410 }
3411
3412 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3413 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303414 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3415
3416}
3417
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003418#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003419#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303420static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003421{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303422 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303423 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3424 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303425 const struct sdhci_msm_offset *msm_host_offset =
3426 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303427 struct cmdq_host *cq_host = host->cq_host;
3428
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303429 u32 version = sdhci_msm_readl_relaxed(host,
3430 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003431 u16 minor = version & CORE_VERSION_TARGET_MASK;
3432 /* registers offset changed starting from 4.2.0 */
3433 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3434
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303435 if (cq_host->offset_changed)
3436 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003437 pr_err("---- Debug RAM dump ----\n");
3438 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3439 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3440 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3441
3442 while (i < 16) {
3443 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3444 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3445 i++;
3446 }
3447 pr_err("-------------------------\n");
3448}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303449
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303450static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3451{
3452 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3453 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3454 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3455
3456 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3457 sizeof(struct mmc_host));
3458 if (msm_host->mmc->card)
3459 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3460 sizeof(struct mmc_card));
3461 memcpy(&cached_data->copy_host, host,
3462 sizeof(struct sdhci_host));
3463}
3464
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303465void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3466{
3467 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3468 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303469 const struct sdhci_msm_offset *msm_host_offset =
3470 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303471 int tbsel, tbsel2;
3472 int i, index = 0;
3473 u32 test_bus_val = 0;
3474 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303475 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303476
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303477 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303478 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003479 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303480 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003481
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303482 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3483 sdhci_msm_readl_relaxed(host,
3484 msm_host_offset->CORE_MCI_DATA_CNT),
3485 sdhci_msm_readl_relaxed(host,
3486 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303487 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303488 sdhci_msm_readl_relaxed(host,
3489 msm_host_offset->CORE_MCI_DATA_CNT),
3490 sdhci_msm_readl_relaxed(host,
3491 msm_host_offset->CORE_MCI_FIFO_CNT),
3492 sdhci_msm_readl_relaxed(host,
3493 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303494 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303495 readl_relaxed(host->ioaddr +
3496 msm_host_offset->CORE_DLL_CONFIG),
3497 readl_relaxed(host->ioaddr +
3498 msm_host_offset->CORE_DLL_STATUS),
3499 sdhci_msm_readl_relaxed(host,
3500 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303501 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303502 readl_relaxed(host->ioaddr +
3503 msm_host_offset->CORE_VENDOR_SPEC),
3504 readl_relaxed(host->ioaddr +
3505 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3506 readl_relaxed(host->ioaddr +
3507 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303508 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303509 readl_relaxed(host->ioaddr +
3510 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303511
3512 /*
3513 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3514 * of CORE_TESTBUS_CONFIG register.
3515 *
3516 * To select test bus 0 to 7 use tbsel and to select any test bus
3517 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3518 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3519 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3520 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003521 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303522 for (tbsel = 0; tbsel < 8; tbsel++) {
3523 if (index >= MAX_TEST_BUS)
3524 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303525 test_bus_val =
3526 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3527 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3528 sdhci_msm_writel_relaxed(test_bus_val, host,
3529 msm_host_offset->CORE_TESTBUS_CONFIG);
3530 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3531 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303532 }
3533 }
3534 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3535 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3536 i, i + 3, debug_reg[i], debug_reg[i+1],
3537 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303538 if (host->is_crypto_en) {
3539 sdhci_msm_ice_get_status(host, &sts);
3540 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003541 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303542 }
3543}
3544
3545static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3546{
3547 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3548 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3549
3550 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303551 if (msm_host->ice.pdev) {
3552 if (msm_host->ice_hci_support)
3553 writel_relaxed(1, host->ioaddr +
3554 HC_VENDOR_SPECIFIC_ICE_CTRL);
3555 else
3556 writel_relaxed(1,
3557 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3558 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303559
3560 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003561}
3562
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303563/*
3564 * sdhci_msm_enhanced_strobe_mask :-
3565 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3566 * SW should write 3 to
3567 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3568 * The default reset value of this register is 2.
3569 */
3570static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3571{
3572 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3573 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303574 const struct sdhci_msm_offset *msm_host_offset =
3575 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303576
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303577 if (!msm_host->enhanced_strobe ||
3578 !mmc_card_strobe(msm_host->mmc->card)) {
3579 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303580 mmc_hostname(host->mmc));
3581 return;
3582 }
3583
3584 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303585 writel_relaxed((readl_relaxed(host->ioaddr +
3586 msm_host_offset->CORE_VENDOR_SPEC3)
3587 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3588 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303589 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303590 writel_relaxed((readl_relaxed(host->ioaddr +
3591 msm_host_offset->CORE_VENDOR_SPEC3)
3592 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3593 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303594 }
3595}
3596
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003597static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3598{
3599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3600 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303601 const struct sdhci_msm_offset *msm_host_offset =
3602 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003603
3604 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303605 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3606 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003607 } else {
3608 u32 value;
3609
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303610 value = sdhci_msm_readl_relaxed(host,
3611 msm_host_offset->CORE_TESTBUS_CONFIG);
3612 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3613 sdhci_msm_writel_relaxed(value, host,
3614 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003615 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303616}
3617
Pavan Anamula691dd592015-08-25 16:11:20 +05303618void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3619{
3620 u32 vendor_func2;
3621 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303622 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3623 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3624 const struct sdhci_msm_offset *msm_host_offset =
3625 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303626
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303627 vendor_func2 = readl_relaxed(host->ioaddr +
3628 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303629
3630 if (enable) {
3631 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303632 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303633 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303634 while (readl_relaxed(host->ioaddr +
3635 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303636 if (timeout == 0) {
3637 pr_info("%s: Applying wait idle disable workaround\n",
3638 mmc_hostname(host->mmc));
3639 /*
3640 * Apply the reset workaround to not wait for
3641 * pending data transfers on AXI before
3642 * resetting the controller. This could be
3643 * risky if the transfers were stuck on the
3644 * AXI bus.
3645 */
3646 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303647 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303648 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303649 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3650 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303651 host->reset_wa_t = ktime_get();
3652 return;
3653 }
3654 timeout--;
3655 udelay(10);
3656 }
3657 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3658 mmc_hostname(host->mmc));
3659 } else {
3660 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303661 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303662 }
3663}
3664
Gilad Broner44445992015-09-29 16:05:39 +03003665static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3666{
3667 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303668 container_of(work, struct sdhci_msm_pm_qos_irq,
3669 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003670
3671 if (atomic_read(&pm_qos_irq->counter))
3672 return;
3673
3674 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3675 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3676}
3677
3678void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3679{
3680 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3681 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3682 struct sdhci_msm_pm_qos_latency *latency =
3683 &msm_host->pdata->pm_qos_data.irq_latency;
3684 int counter;
3685
3686 if (!msm_host->pm_qos_irq.enabled)
3687 return;
3688
3689 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3690 /* Make sure to update the voting in case power policy has changed */
3691 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3692 && counter > 1)
3693 return;
3694
Asutosh Das36c2e922015-12-01 12:19:58 +05303695 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003696 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3697 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3698 msm_host->pm_qos_irq.latency);
3699}
3700
3701void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3702{
3703 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3704 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3705 int counter;
3706
3707 if (!msm_host->pm_qos_irq.enabled)
3708 return;
3709
Subhash Jadavani4d813902015-10-15 12:16:43 -07003710 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3711 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3712 } else {
3713 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3714 return;
Gilad Broner44445992015-09-29 16:05:39 +03003715 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003716
Gilad Broner44445992015-09-29 16:05:39 +03003717 if (counter)
3718 return;
3719
3720 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303721 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3722 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003723 return;
3724 }
3725
3726 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3727 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3728 msm_host->pm_qos_irq.latency);
3729}
3730
Gilad Broner68c54562015-09-20 11:59:46 +03003731static ssize_t
3732sdhci_msm_pm_qos_irq_show(struct device *dev,
3733 struct device_attribute *attr, char *buf)
3734{
3735 struct sdhci_host *host = dev_get_drvdata(dev);
3736 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3737 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3738 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3739
3740 return snprintf(buf, PAGE_SIZE,
3741 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3742 irq->enabled, atomic_read(&irq->counter), irq->latency);
3743}
3744
3745static ssize_t
3746sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3747 struct device_attribute *attr, char *buf)
3748{
3749 struct sdhci_host *host = dev_get_drvdata(dev);
3750 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3751 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3752
3753 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3754}
3755
3756static ssize_t
3757sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3758 struct device_attribute *attr, const char *buf, size_t count)
3759{
3760 struct sdhci_host *host = dev_get_drvdata(dev);
3761 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3762 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3763 uint32_t value;
3764 bool enable;
3765 int ret;
3766
3767 ret = kstrtou32(buf, 0, &value);
3768 if (ret)
3769 goto out;
3770 enable = !!value;
3771
3772 if (enable == msm_host->pm_qos_irq.enabled)
3773 goto out;
3774
3775 msm_host->pm_qos_irq.enabled = enable;
3776 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303777 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003778 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3779 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3780 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3781 msm_host->pm_qos_irq.latency);
3782 }
3783
3784out:
3785 return count;
3786}
3787
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003788#ifdef CONFIG_SMP
3789static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3790 struct sdhci_host *host)
3791{
3792 msm_host->pm_qos_irq.req.irq = host->irq;
3793}
3794#else
3795static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3796 struct sdhci_host *host) { }
3797#endif
3798
Gilad Broner44445992015-09-29 16:05:39 +03003799void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3800{
3801 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3802 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3803 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003804 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003805
3806 if (!msm_host->pdata->pm_qos_data.irq_valid)
3807 return;
3808
3809 /* Initialize only once as this gets called per partition */
3810 if (msm_host->pm_qos_irq.enabled)
3811 return;
3812
3813 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3814 msm_host->pm_qos_irq.req.type =
3815 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003816 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3817 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3818 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003819 else
3820 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3821 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3822
Asutosh Das36c2e922015-12-01 12:19:58 +05303823 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003824 sdhci_msm_pm_qos_irq_unvote_work);
3825 /* For initialization phase, set the performance latency */
3826 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3827 msm_host->pm_qos_irq.latency =
3828 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3829 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3830 msm_host->pm_qos_irq.latency);
3831 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003832
3833 /* sysfs */
3834 msm_host->pm_qos_irq.enable_attr.show =
3835 sdhci_msm_pm_qos_irq_enable_show;
3836 msm_host->pm_qos_irq.enable_attr.store =
3837 sdhci_msm_pm_qos_irq_enable_store;
3838 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3839 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3840 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3841 ret = device_create_file(&msm_host->pdev->dev,
3842 &msm_host->pm_qos_irq.enable_attr);
3843 if (ret)
3844 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3845 __func__, ret);
3846
3847 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3848 msm_host->pm_qos_irq.status_attr.store = NULL;
3849 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3850 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3851 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3852 ret = device_create_file(&msm_host->pdev->dev,
3853 &msm_host->pm_qos_irq.status_attr);
3854 if (ret)
3855 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3856 __func__, ret);
3857}
3858
3859static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3860 struct device_attribute *attr, char *buf)
3861{
3862 struct sdhci_host *host = dev_get_drvdata(dev);
3863 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3864 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3865 struct sdhci_msm_pm_qos_group *group;
3866 int i;
3867 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3868 int offset = 0;
3869
3870 for (i = 0; i < nr_groups; i++) {
3871 group = &msm_host->pm_qos[i];
3872 offset += snprintf(&buf[offset], PAGE_SIZE,
3873 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3874 i, group->req.cpus_affine.bits[0],
3875 msm_host->pm_qos_group_enable,
3876 atomic_read(&group->counter),
3877 group->latency);
3878 }
3879
3880 return offset;
3881}
3882
3883static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3884 struct device_attribute *attr, char *buf)
3885{
3886 struct sdhci_host *host = dev_get_drvdata(dev);
3887 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3888 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3889
3890 return snprintf(buf, PAGE_SIZE, "%s\n",
3891 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3892}
3893
3894static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3895 struct device_attribute *attr, const char *buf, size_t count)
3896{
3897 struct sdhci_host *host = dev_get_drvdata(dev);
3898 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3899 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3900 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3901 uint32_t value;
3902 bool enable;
3903 int ret;
3904 int i;
3905
3906 ret = kstrtou32(buf, 0, &value);
3907 if (ret)
3908 goto out;
3909 enable = !!value;
3910
3911 if (enable == msm_host->pm_qos_group_enable)
3912 goto out;
3913
3914 msm_host->pm_qos_group_enable = enable;
3915 if (!enable) {
3916 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303917 cancel_delayed_work_sync(
3918 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003919 atomic_set(&msm_host->pm_qos[i].counter, 0);
3920 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3921 pm_qos_update_request(&msm_host->pm_qos[i].req,
3922 msm_host->pm_qos[i].latency);
3923 }
3924 }
3925
3926out:
3927 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003928}
3929
3930static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3931{
3932 int i;
3933 struct sdhci_msm_cpu_group_map *map =
3934 &msm_host->pdata->pm_qos_data.cpu_group_map;
3935
3936 if (cpu < 0)
3937 goto not_found;
3938
3939 for (i = 0; i < map->nr_groups; i++)
3940 if (cpumask_test_cpu(cpu, &map->mask[i]))
3941 return i;
3942
3943not_found:
3944 return -EINVAL;
3945}
3946
3947void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3948 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3949{
3950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3951 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3952 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3953 struct sdhci_msm_pm_qos_group *pm_qos_group;
3954 int counter;
3955
3956 if (!msm_host->pm_qos_group_enable || group < 0)
3957 return;
3958
3959 pm_qos_group = &msm_host->pm_qos[group];
3960 counter = atomic_inc_return(&pm_qos_group->counter);
3961
3962 /* Make sure to update the voting in case power policy has changed */
3963 if (pm_qos_group->latency == latency->latency[host->power_policy]
3964 && counter > 1)
3965 return;
3966
Asutosh Das36c2e922015-12-01 12:19:58 +05303967 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003968
3969 pm_qos_group->latency = latency->latency[host->power_policy];
3970 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3971}
3972
3973static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3974{
3975 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303976 container_of(work, struct sdhci_msm_pm_qos_group,
3977 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003978
3979 if (atomic_read(&group->counter))
3980 return;
3981
3982 group->latency = PM_QOS_DEFAULT_VALUE;
3983 pm_qos_update_request(&group->req, group->latency);
3984}
3985
Gilad Broner07d92eb2015-09-29 16:57:21 +03003986bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003987{
3988 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3989 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3990 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3991
3992 if (!msm_host->pm_qos_group_enable || group < 0 ||
3993 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003994 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003995
3996 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303997 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3998 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003999 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004000 }
4001
4002 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4003 pm_qos_update_request(&msm_host->pm_qos[group].req,
4004 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004005 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004006}
4007
4008void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4009 struct sdhci_msm_pm_qos_latency *latency)
4010{
4011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4013 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4014 struct sdhci_msm_pm_qos_group *group;
4015 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004016 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004017
4018 if (msm_host->pm_qos_group_enable)
4019 return;
4020
4021 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4022 GFP_KERNEL);
4023 if (!msm_host->pm_qos)
4024 return;
4025
4026 for (i = 0; i < nr_groups; i++) {
4027 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304028 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004029 sdhci_msm_pm_qos_cpu_unvote_work);
4030 atomic_set(&group->counter, 0);
4031 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4032 cpumask_copy(&group->req.cpus_affine,
4033 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304034 /* We set default latency here for all pm_qos cpu groups. */
4035 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004036 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4037 group->latency);
4038 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
4039 __func__, i,
4040 group->req.cpus_affine.bits[0],
4041 group->latency,
4042 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
4043 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004044 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004045 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004046
4047 /* sysfs */
4048 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4049 msm_host->pm_qos_group_status_attr.store = NULL;
4050 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4051 msm_host->pm_qos_group_status_attr.attr.name =
4052 "pm_qos_cpu_groups_status";
4053 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4054 ret = device_create_file(&msm_host->pdev->dev,
4055 &msm_host->pm_qos_group_status_attr);
4056 if (ret)
4057 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4058 __func__, ret);
4059 msm_host->pm_qos_group_enable_attr.show =
4060 sdhci_msm_pm_qos_group_enable_show;
4061 msm_host->pm_qos_group_enable_attr.store =
4062 sdhci_msm_pm_qos_group_enable_store;
4063 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4064 msm_host->pm_qos_group_enable_attr.attr.name =
4065 "pm_qos_cpu_groups_enable";
4066 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4067 ret = device_create_file(&msm_host->pdev->dev,
4068 &msm_host->pm_qos_group_enable_attr);
4069 if (ret)
4070 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4071 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004072}
4073
Gilad Broner07d92eb2015-09-29 16:57:21 +03004074static void sdhci_msm_pre_req(struct sdhci_host *host,
4075 struct mmc_request *mmc_req)
4076{
4077 int cpu;
4078 int group;
4079 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4080 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4081 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4082 msm_host->pm_qos_prev_cpu);
4083
4084 sdhci_msm_pm_qos_irq_vote(host);
4085
4086 cpu = get_cpu();
4087 put_cpu();
4088 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4089 if (group < 0)
4090 return;
4091
4092 if (group != prev_group && prev_group >= 0) {
4093 sdhci_msm_pm_qos_cpu_unvote(host,
4094 msm_host->pm_qos_prev_cpu, false);
4095 prev_group = -1; /* make sure to vote for new group */
4096 }
4097
4098 if (prev_group < 0) {
4099 sdhci_msm_pm_qos_cpu_vote(host,
4100 msm_host->pdata->pm_qos_data.latency, cpu);
4101 msm_host->pm_qos_prev_cpu = cpu;
4102 }
4103}
4104
4105static void sdhci_msm_post_req(struct sdhci_host *host,
4106 struct mmc_request *mmc_req)
4107{
4108 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4109 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4110
4111 sdhci_msm_pm_qos_irq_unvote(host, false);
4112
4113 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4114 msm_host->pm_qos_prev_cpu = -1;
4115}
4116
4117static void sdhci_msm_init(struct sdhci_host *host)
4118{
4119 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4120 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4121
4122 sdhci_msm_pm_qos_irq_init(host);
4123
4124 if (msm_host->pdata->pm_qos_data.legacy_valid)
4125 sdhci_msm_pm_qos_cpu_init(host,
4126 msm_host->pdata->pm_qos_data.latency);
4127}
4128
Sahitya Tummala9150a942014-10-31 15:33:04 +05304129static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4130{
4131 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4132 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4133 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4134 u32 max_curr = 0;
4135
4136 if (curr_slot && curr_slot->vdd_data)
4137 max_curr = curr_slot->vdd_data->hpm_uA;
4138
4139 return max_curr;
4140}
4141
Sahitya Tummala073ca552015-08-06 13:59:37 +05304142static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4143{
4144 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4145 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4146 int ret = 0;
4147 u32 clk_rate = 0;
4148
4149 if (!IS_ERR(msm_host->ice_clk)) {
4150 clk_rate = (state == MMC_LOAD_LOW) ?
4151 msm_host->pdata->ice_clk_min :
4152 msm_host->pdata->ice_clk_max;
4153 if (msm_host->ice_clk_rate == clk_rate)
4154 return 0;
4155 pr_debug("%s: changing ICE clk rate to %u\n",
4156 mmc_hostname(host->mmc), clk_rate);
4157 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4158 if (ret) {
4159 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4160 mmc_hostname(host->mmc), ret, clk_rate);
4161 return ret;
4162 }
4163 msm_host->ice_clk_rate = clk_rate;
4164 }
4165 return 0;
4166}
4167
Asutosh Das0ef24812012-12-18 16:14:02 +05304168static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304169 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304170 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304171 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304172 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304173 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304174 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304175 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004176 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304177 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004178 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304179 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304180 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304181 .get_min_clock = sdhci_msm_get_min_clock,
4182 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304183 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304184 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304185 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004186 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304187 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004188 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304189 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304190 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004191 .init = sdhci_msm_init,
4192 .pre_req = sdhci_msm_pre_req,
4193 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304194 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304195 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304196};
4197
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304198static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4199 struct sdhci_host *host)
4200{
Krishna Konda46fd1432014-10-30 21:13:27 -07004201 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304202 u16 minor;
4203 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304204 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304205 const struct sdhci_msm_offset *msm_host_offset =
4206 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304207
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304208 version = sdhci_msm_readl_relaxed(host,
4209 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304210 major = (version & CORE_VERSION_MAJOR_MASK) >>
4211 CORE_VERSION_MAJOR_SHIFT;
4212 minor = version & CORE_VERSION_TARGET_MASK;
4213
Krishna Konda46fd1432014-10-30 21:13:27 -07004214 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4215
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304216 /*
4217 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004218 * controller won't advertise 3.0v, 1.8v and 8-bit features
4219 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304220 */
4221 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004222 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004223 /*
4224 * Enable 1.8V support capability on controllers that
4225 * support dual voltage
4226 */
4227 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004228 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4229 caps |= CORE_3_0V_SUPPORT;
4230 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004231 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304232 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4233 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304234 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004235
4236 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304237 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4238 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4239 */
4240 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304241 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304242 val = readl_relaxed(host->ioaddr +
4243 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304244 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304245 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304246 }
4247 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004248 * SDCC 5 controller with major version 1, minor version 0x34 and later
4249 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4250 */
4251 if ((major == 1) && (minor < 0x34))
4252 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004253
4254 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004255 * SDCC 5 controller with major version 1, minor version 0x42 and later
4256 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304257 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004258 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304259 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004260 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304261 msm_host->enhanced_strobe = true;
4262 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004263
4264 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004265 * SDCC 5 controller with major version 1 and minor version 0x42,
4266 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4267 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304268 * when MCLK is gated OFF, it is not gated for less than 0.5us
4269 * and MCLK must be switched on for at-least 1us before DATA
4270 * starts coming.
4271 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004272 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badiganti06d2c8c2017-09-12 17:24:09 +05304273 (minor == 0x49) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304274 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004275
Pavan Anamula5a256df2015-10-16 14:38:28 +05304276 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304277 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304278 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304279 writel_relaxed((readl_relaxed(host->ioaddr +
4280 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4281 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304282 }
4283
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004284 if ((major == 1) && (minor >= 0x49))
4285 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304286 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004287 * Mask 64-bit support for controller with 32-bit address bus so that
4288 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004289 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004290 if (!msm_host->pdata->largeaddressbus)
4291 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4292
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304293 writel_relaxed(caps, host->ioaddr +
4294 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004295 /* keep track of the value in SDHCI_CAPABILITIES */
4296 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304297
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304298 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304299 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304300 host->cdr_support = true;
4301 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304302}
4303
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004304#ifdef CONFIG_MMC_CQ_HCI
4305static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4306 struct platform_device *pdev)
4307{
4308 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4309 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4310
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304311 if (nocmdq) {
4312 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4313 return;
4314 }
4315
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004316 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004317 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004318 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4319 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004320 host->cq_host = NULL;
4321 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004322 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004323 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004324}
4325#else
4326static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4327 struct platform_device *pdev)
4328{
4329
4330}
4331#endif
4332
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004333static bool sdhci_msm_is_bootdevice(struct device *dev)
4334{
4335 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4336 strlen(saved_command_line))) {
4337 char search_string[50];
4338
4339 snprintf(search_string, ARRAY_SIZE(search_string),
4340 "androidboot.bootdevice=%s", dev_name(dev));
4341 if (strnstr(saved_command_line, search_string,
4342 strlen(saved_command_line)))
4343 return true;
4344 else
4345 return false;
4346 }
4347
4348 /*
4349 * "androidboot.bootdevice=" argument is not present then
4350 * return true as we don't know the boot device anyways.
4351 */
4352 return true;
4353}
4354
Asutosh Das0ef24812012-12-18 16:14:02 +05304355static int sdhci_msm_probe(struct platform_device *pdev)
4356{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304357 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304358 struct sdhci_host *host;
4359 struct sdhci_pltfm_host *pltfm_host;
4360 struct sdhci_msm_host *msm_host;
4361 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004362 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004363 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004364 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304365 struct resource *tlmm_memres = NULL;
4366 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304367 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004368 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304369
4370 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4371 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4372 GFP_KERNEL);
4373 if (!msm_host) {
4374 ret = -ENOMEM;
4375 goto out;
4376 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304377
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304378 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4379 msm_host->mci_removed = true;
4380 msm_host->offset = &sdhci_msm_offset_mci_removed;
4381 } else {
4382 msm_host->mci_removed = false;
4383 msm_host->offset = &sdhci_msm_offset_mci_present;
4384 }
4385 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304386 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4387 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4388 if (IS_ERR(host)) {
4389 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304390 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304391 }
4392
4393 pltfm_host = sdhci_priv(host);
4394 pltfm_host->priv = msm_host;
4395 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304396 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304397
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304398 /* get the ice device vops if present */
4399 ret = sdhci_msm_ice_get_dev(host);
4400 if (ret == -EPROBE_DEFER) {
4401 /*
4402 * SDHCI driver might be probed before ICE driver does.
4403 * In that case we would like to return EPROBE_DEFER code
4404 * in order to delay its probing.
4405 */
4406 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4407 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004408 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304409
4410 } else if (ret == -ENODEV) {
4411 /*
4412 * ICE device is not enabled in DTS file. No need for further
4413 * initialization of ICE driver.
4414 */
4415 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4416 __func__);
4417 } else if (ret) {
4418 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4419 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004420 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304421 }
4422
Asutosh Das0ef24812012-12-18 16:14:02 +05304423 /* Extract platform data */
4424 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004425 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304426 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004427 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4428 ret);
4429 goto pltfm_free;
4430 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004431
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004432 /* Read property to determine if the probe is forced */
4433 force_probe = of_find_property(pdev->dev.of_node,
4434 "qcom,force-sdhc1-probe", NULL);
4435
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004436 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004437 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4438 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004439 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004440 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004441 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004442
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004443 if (disable_slots & (1 << (ret - 1))) {
4444 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4445 ret);
4446 ret = -ENODEV;
4447 goto pltfm_free;
4448 }
4449
Sayali Lokhande5f768322016-04-11 18:36:53 +05304450 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004451 sdhci_slot[ret-1] = msm_host;
4452
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004453 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4454 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304455 if (!msm_host->pdata) {
4456 dev_err(&pdev->dev, "DT parsing error\n");
4457 goto pltfm_free;
4458 }
4459 } else {
4460 dev_err(&pdev->dev, "No device tree node\n");
4461 goto pltfm_free;
4462 }
4463
4464 /* Setup Clocks */
4465
4466 /* Setup SDCC bus voter clock. */
4467 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4468 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4469 /* Vote for max. clk rate for max. performance */
4470 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4471 if (ret)
4472 goto pltfm_free;
4473 ret = clk_prepare_enable(msm_host->bus_clk);
4474 if (ret)
4475 goto pltfm_free;
4476 }
4477
4478 /* Setup main peripheral bus clock */
4479 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4480 if (!IS_ERR(msm_host->pclk)) {
4481 ret = clk_prepare_enable(msm_host->pclk);
4482 if (ret)
4483 goto bus_clk_disable;
4484 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304485 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304486
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304487 /* Setup SDC ufs bus aggr clock */
4488 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4489 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4490 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4491 if (ret) {
4492 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4493 goto pclk_disable;
4494 }
4495 }
4496
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304497 if (msm_host->ice.pdev) {
4498 /* Setup SDC ICE clock */
4499 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4500 if (!IS_ERR(msm_host->ice_clk)) {
4501 /* ICE core has only one clock frequency for now */
4502 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304503 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304504 if (ret) {
4505 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4506 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304507 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304508 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304509 }
4510 ret = clk_prepare_enable(msm_host->ice_clk);
4511 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304512 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304513
4514 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304515 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304516 }
4517 }
4518
Asutosh Das0ef24812012-12-18 16:14:02 +05304519 /* Setup SDC MMC clock */
4520 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4521 if (IS_ERR(msm_host->clk)) {
4522 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304523 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304524 }
4525
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304526 /* Set to the minimum supported clock frequency */
4527 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4528 if (ret) {
4529 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304530 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304531 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304532 ret = clk_prepare_enable(msm_host->clk);
4533 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304534 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304535
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304536 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304537 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304538
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004539 /* Setup CDC calibration fixed feedback clock */
4540 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4541 if (!IS_ERR(msm_host->ff_clk)) {
4542 ret = clk_prepare_enable(msm_host->ff_clk);
4543 if (ret)
4544 goto clk_disable;
4545 }
4546
4547 /* Setup CDC calibration sleep clock */
4548 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4549 if (!IS_ERR(msm_host->sleep_clk)) {
4550 ret = clk_prepare_enable(msm_host->sleep_clk);
4551 if (ret)
4552 goto ff_clk_disable;
4553 }
4554
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004555 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4556
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304557 ret = sdhci_msm_bus_register(msm_host, pdev);
4558 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004559 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304560
4561 if (msm_host->msm_bus_vote.client_handle)
4562 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4563 sdhci_msm_bus_work);
4564 sdhci_msm_bus_voting(host, 1);
4565
Asutosh Das0ef24812012-12-18 16:14:02 +05304566 /* Setup regulators */
4567 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4568 if (ret) {
4569 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304570 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304571 }
4572
4573 /* Reset the core and Enable SDHC mode */
4574 core_memres = platform_get_resource_byname(pdev,
4575 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304576 if (!msm_host->mci_removed) {
4577 if (!core_memres) {
4578 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4579 goto vreg_deinit;
4580 }
4581 msm_host->core_mem = devm_ioremap(&pdev->dev,
4582 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304583
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304584 if (!msm_host->core_mem) {
4585 dev_err(&pdev->dev, "Failed to remap registers\n");
4586 ret = -ENOMEM;
4587 goto vreg_deinit;
4588 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304589 }
4590
Sahitya Tummala079ed852015-10-29 20:18:45 +05304591 tlmm_memres = platform_get_resource_byname(pdev,
4592 IORESOURCE_MEM, "tlmm_mem");
4593 if (tlmm_memres) {
4594 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4595 resource_size(tlmm_memres));
4596
4597 if (!tlmm_mem) {
4598 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4599 ret = -ENOMEM;
4600 goto vreg_deinit;
4601 }
4602 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4603 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4604 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4605 }
4606
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304607 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004608 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304609 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004610 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304611 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304612
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05304613 /*
4614 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
4615 */
4616 writel_relaxed((readl_relaxed(host->ioaddr +
4617 msm_host_offset->CORE_VENDOR_SPEC3) &
4618 ~CORE_FIFO_ALT_EN), host->ioaddr +
4619 msm_host_offset->CORE_VENDOR_SPEC3);
4620
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304621 if (!msm_host->mci_removed) {
4622 /* Set HC_MODE_EN bit in HC_MODE register */
4623 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304624
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304625 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4626 writel_relaxed(readl_relaxed(msm_host->core_mem +
4627 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4628 msm_host->core_mem + CORE_HC_MODE);
4629 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304630 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004631
4632 /*
4633 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4634 * be used as required later on.
4635 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304636 writel_relaxed((readl_relaxed(host->ioaddr +
4637 msm_host_offset->CORE_VENDOR_SPEC) |
4638 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4639 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304640 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304641 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4642 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4643 * interrupt in GIC (by registering the interrupt handler), we need to
4644 * ensure that any pending power irq interrupt status is acknowledged
4645 * otherwise power irq interrupt handler would be fired prematurely.
4646 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304647 irq_status = sdhci_msm_readl_relaxed(host,
4648 msm_host_offset->CORE_PWRCTL_STATUS);
4649 sdhci_msm_writel_relaxed(irq_status, host,
4650 msm_host_offset->CORE_PWRCTL_CLEAR);
4651 irq_ctl = sdhci_msm_readl_relaxed(host,
4652 msm_host_offset->CORE_PWRCTL_CTL);
4653
Subhash Jadavani28137342013-05-14 17:46:43 +05304654 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4655 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4656 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4657 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304658 sdhci_msm_writel_relaxed(irq_ctl, host,
4659 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004660
Subhash Jadavani28137342013-05-14 17:46:43 +05304661 /*
4662 * Ensure that above writes are propogated before interrupt enablement
4663 * in GIC.
4664 */
4665 mb();
4666
4667 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304668 * Following are the deviations from SDHC spec v3.0 -
4669 * 1. Card detection is handled using separate GPIO.
4670 * 2. Bus power control is handled by interacting with PMIC.
4671 */
4672 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4673 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304674 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004675 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304676 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304677 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304678 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304679 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304680 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304681 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304682
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304683 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4684 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4685
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004686 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004687 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4688 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4689 SDHCI_VENDOR_VER_SHIFT));
4690 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4691 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4692 /*
4693 * Add 40us delay in interrupt handler when
4694 * operating at initialization frequency(400KHz).
4695 */
4696 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4697 /*
4698 * Set Software Reset for DAT line in Software
4699 * Reset Register (Bit 2).
4700 */
4701 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4702 }
4703
Asutosh Das214b9662013-06-13 14:27:42 +05304704 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4705
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004706 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004707 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4708 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304709 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004710 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304711 goto vreg_deinit;
4712 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004713 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304714 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004715 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304716 if (ret) {
4717 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004718 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304719 goto vreg_deinit;
4720 }
4721
4722 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304723 sdhci_msm_writel_relaxed(INT_MASK, host,
4724 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304725
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304726#ifdef CONFIG_MMC_CLKGATE
4727 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4728 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4729#endif
4730
Asutosh Das0ef24812012-12-18 16:14:02 +05304731 /* Set host capabilities */
4732 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4733 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004734 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304735 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304736 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004737 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004738 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004739 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304740 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004741 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004742 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304743 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304744
4745 if (msm_host->pdata->nonremovable)
4746 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4747
Guoping Yuf7c91332014-08-20 16:56:18 +08004748 if (msm_host->pdata->nonhotplug)
4749 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4750
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004751 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4752
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304753 /* Initialize ICE if present */
4754 if (msm_host->ice.pdev) {
4755 ret = sdhci_msm_ice_init(host);
4756 if (ret) {
4757 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
4758 mmc_hostname(host->mmc), ret);
4759 ret = -EINVAL;
4760 goto vreg_deinit;
4761 }
4762 host->is_crypto_en = true;
4763 /* Packed commands cannot be encrypted/decrypted using ICE */
4764 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
4765 MMC_CAP2_PACKED_WR_CONTROL);
4766 }
4767
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304768 init_completion(&msm_host->pwr_irq_completion);
4769
Sahitya Tummala581df132013-03-12 14:57:46 +05304770 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304771 /*
4772 * Set up the card detect GPIO in active configuration before
4773 * configuring it as an IRQ. Otherwise, it can be in some
4774 * weird/inconsistent state resulting in flood of interrupts.
4775 */
4776 sdhci_msm_setup_pins(msm_host->pdata, true);
4777
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304778 /*
4779 * This delay is needed for stabilizing the card detect GPIO
4780 * line after changing the pull configs.
4781 */
4782 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304783 ret = mmc_gpio_request_cd(msm_host->mmc,
4784 msm_host->pdata->status_gpio, 0);
4785 if (ret) {
4786 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4787 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304788 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304789 }
4790 }
4791
Krishna Konda7feab352013-09-17 23:55:40 -07004792 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4793 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4794 host->dma_mask = DMA_BIT_MASK(64);
4795 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304796 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004797 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304798 host->dma_mask = DMA_BIT_MASK(32);
4799 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304800 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304801 } else {
4802 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4803 }
4804
Ritesh Harjani42876f42015-11-17 17:46:51 +05304805 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4806 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304807 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304808 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4809 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304810 msm_host->is_sdiowakeup_enabled = true;
4811 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4812 sdhci_msm_sdiowakeup_irq,
4813 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4814 "sdhci-msm sdiowakeup", host);
4815 if (ret) {
4816 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4817 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4818 msm_host->pdata->sdiowakeup_irq = -1;
4819 msm_host->is_sdiowakeup_enabled = false;
4820 goto vreg_deinit;
4821 } else {
4822 spin_lock_irqsave(&host->lock, flags);
4823 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304824 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304825 spin_unlock_irqrestore(&host->lock, flags);
4826 }
4827 }
4828
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004829 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304830 ret = sdhci_add_host(host);
4831 if (ret) {
4832 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304833 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304834 }
4835
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05304836 msm_host->pltfm_init_done = true;
4837
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004838 pm_runtime_set_active(&pdev->dev);
4839 pm_runtime_enable(&pdev->dev);
4840 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4841 pm_runtime_use_autosuspend(&pdev->dev);
4842
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304843 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4844 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4845 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4846 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4847 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4848 ret = device_create_file(&pdev->dev,
4849 &msm_host->msm_bus_vote.max_bus_bw);
4850 if (ret)
4851 goto remove_host;
4852
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304853 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4854 msm_host->polling.show = show_polling;
4855 msm_host->polling.store = store_polling;
4856 sysfs_attr_init(&msm_host->polling.attr);
4857 msm_host->polling.attr.name = "polling";
4858 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4859 ret = device_create_file(&pdev->dev, &msm_host->polling);
4860 if (ret)
4861 goto remove_max_bus_bw_file;
4862 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304863
4864 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4865 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4866 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4867 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4868 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4869 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4870 if (ret) {
4871 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4872 mmc_hostname(host->mmc), __func__, ret);
4873 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4874 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304875 /* Successful initialization */
4876 goto out;
4877
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304878remove_max_bus_bw_file:
4879 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304880remove_host:
4881 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004882 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304883 sdhci_remove_host(host, dead);
4884vreg_deinit:
4885 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304886bus_unregister:
4887 if (msm_host->msm_bus_vote.client_handle)
4888 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4889 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004890sleep_clk_disable:
4891 if (!IS_ERR(msm_host->sleep_clk))
4892 clk_disable_unprepare(msm_host->sleep_clk);
4893ff_clk_disable:
4894 if (!IS_ERR(msm_host->ff_clk))
4895 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304896clk_disable:
4897 if (!IS_ERR(msm_host->clk))
4898 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304899bus_aggr_clk_disable:
4900 if (!IS_ERR(msm_host->bus_aggr_clk))
4901 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304902pclk_disable:
4903 if (!IS_ERR(msm_host->pclk))
4904 clk_disable_unprepare(msm_host->pclk);
4905bus_clk_disable:
4906 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4907 clk_disable_unprepare(msm_host->bus_clk);
4908pltfm_free:
4909 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304910out_host_free:
4911 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304912out:
4913 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4914 return ret;
4915}
4916
4917static int sdhci_msm_remove(struct platform_device *pdev)
4918{
4919 struct sdhci_host *host = platform_get_drvdata(pdev);
4920 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4921 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4922 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4923 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4924 0xffffffff);
4925
4926 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304927 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4928 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304929 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004930 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304931 sdhci_remove_host(host, dead);
4932 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304933
Asutosh Das0ef24812012-12-18 16:14:02 +05304934 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304935
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304936 sdhci_msm_setup_pins(pdata, true);
4937 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304938
4939 if (msm_host->msm_bus_vote.client_handle) {
4940 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4941 sdhci_msm_bus_unregister(msm_host);
4942 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304943 return 0;
4944}
4945
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004946#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304947static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4948{
4949 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4950 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4951 unsigned long flags;
4952 int ret = 0;
4953
4954 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4955 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4956 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304957 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304958 return 1;
4959 }
4960
4961 spin_lock_irqsave(&host->lock, flags);
4962 if (enable) {
4963 /* configure DAT1 gpio if applicable */
4964 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304965 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304966 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4967 if (!ret)
4968 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4969 goto out;
4970 } else {
4971 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4972 mmc_hostname(host->mmc), enable);
4973 }
4974 } else {
4975 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4976 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4977 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304978 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304979 } else {
4980 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4981 mmc_hostname(host->mmc), enable);
4982
4983 }
4984 }
4985out:
4986 if (ret)
4987 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4988 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4989 ret, msm_host->pdata->sdiowakeup_irq);
4990 spin_unlock_irqrestore(&host->lock, flags);
4991 return ret;
4992}
4993
4994
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004995static int sdhci_msm_runtime_suspend(struct device *dev)
4996{
4997 struct sdhci_host *host = dev_get_drvdata(dev);
4998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005000 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305001 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005002
Ritesh Harjani42876f42015-11-17 17:46:51 +05305003 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5004 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305005
Ritesh Harjani42876f42015-11-17 17:46:51 +05305006 sdhci_cfg_irq(host, false, true);
5007
5008defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005009 disable_irq(msm_host->pwr_irq);
5010
5011 /*
5012 * Remove the vote immediately only if clocks are off in which
5013 * case we might have queued work to remove vote but it may not
5014 * be completed before runtime suspend or system suspend.
5015 */
5016 if (!atomic_read(&msm_host->clks_on)) {
5017 if (msm_host->msm_bus_vote.client_handle)
5018 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5019 }
5020
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305021 if (host->is_crypto_en) {
5022 ret = sdhci_msm_ice_suspend(host);
5023 if (ret < 0)
5024 pr_err("%s: failed to suspend crypto engine %d\n",
5025 mmc_hostname(host->mmc), ret);
5026 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005027 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5028 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005029 return 0;
5030}
5031
5032static int sdhci_msm_runtime_resume(struct device *dev)
5033{
5034 struct sdhci_host *host = dev_get_drvdata(dev);
5035 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5036 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005037 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305038 int ret;
5039
5040 if (host->is_crypto_en) {
5041 ret = sdhci_msm_enable_controller_clock(host);
5042 if (ret) {
5043 pr_err("%s: Failed to enable reqd clocks\n",
5044 mmc_hostname(host->mmc));
5045 goto skip_ice_resume;
5046 }
5047 ret = sdhci_msm_ice_resume(host);
5048 if (ret)
5049 pr_err("%s: failed to resume crypto engine %d\n",
5050 mmc_hostname(host->mmc), ret);
5051 }
5052skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005053
Ritesh Harjani42876f42015-11-17 17:46:51 +05305054 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5055 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305056
Ritesh Harjani42876f42015-11-17 17:46:51 +05305057 sdhci_cfg_irq(host, true, true);
5058
5059defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005060 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005061
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005062 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5063 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005064 return 0;
5065}
5066
5067static int sdhci_msm_suspend(struct device *dev)
5068{
5069 struct sdhci_host *host = dev_get_drvdata(dev);
5070 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5071 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005072 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305073 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005074 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005075
5076 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5077 (msm_host->mmc->slot.cd_irq >= 0))
5078 disable_irq(msm_host->mmc->slot.cd_irq);
5079
5080 if (pm_runtime_suspended(dev)) {
5081 pr_debug("%s: %s: already runtime suspended\n",
5082 mmc_hostname(host->mmc), __func__);
5083 goto out;
5084 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005085 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005086out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305087 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305088 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5089 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5090 if (sdio_cfg)
5091 sdhci_cfg_irq(host, false, true);
5092 }
5093
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005094 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5095 ktime_to_us(ktime_sub(ktime_get(), start)));
5096 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005097}
5098
5099static int sdhci_msm_resume(struct device *dev)
5100{
5101 struct sdhci_host *host = dev_get_drvdata(dev);
5102 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5103 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5104 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305105 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005106 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005107
5108 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5109 (msm_host->mmc->slot.cd_irq >= 0))
5110 enable_irq(msm_host->mmc->slot.cd_irq);
5111
5112 if (pm_runtime_suspended(dev)) {
5113 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5114 mmc_hostname(host->mmc), __func__);
5115 goto out;
5116 }
5117
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005118 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005119out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305120 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5121 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5122 if (sdio_cfg)
5123 sdhci_cfg_irq(host, true, true);
5124 }
5125
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005126 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5127 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005128 return ret;
5129}
5130
Ritesh Harjani42876f42015-11-17 17:46:51 +05305131static int sdhci_msm_suspend_noirq(struct device *dev)
5132{
5133 struct sdhci_host *host = dev_get_drvdata(dev);
5134 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5135 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5136 int ret = 0;
5137
5138 /*
5139 * ksdioirqd may be running, hence retry
5140 * suspend in case the clocks are ON
5141 */
5142 if (atomic_read(&msm_host->clks_on)) {
5143 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5144 mmc_hostname(host->mmc), __func__);
5145 ret = -EAGAIN;
5146 }
5147
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305148 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5149 if (msm_host->sdio_pending_processing)
5150 ret = -EBUSY;
5151
Ritesh Harjani42876f42015-11-17 17:46:51 +05305152 return ret;
5153}
5154
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005155static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305156 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005157 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5158 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305159 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005160};
5161
5162#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5163
5164#else
5165#define SDHCI_MSM_PMOPS NULL
5166#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305167static const struct of_device_id sdhci_msm_dt_match[] = {
5168 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305169 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005170 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305171};
5172MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5173
5174static struct platform_driver sdhci_msm_driver = {
5175 .probe = sdhci_msm_probe,
5176 .remove = sdhci_msm_remove,
5177 .driver = {
5178 .name = "sdhci_msm",
5179 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305180 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305181 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005182 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305183 },
5184};
5185
5186module_platform_driver(sdhci_msm_driver);
5187
5188MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5189MODULE_LICENSE("GPL v2");