blob: d7409a8095801aa90982bd5f2c062851aa7c28f2 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Ritesh Harjani82124772014-11-04 15:34:00 +0530316/* Timeout value to avoid infinite waiting for pwr_irq */
317#define MSM_PWR_IRQ_TIMEOUT_MS 5000
318
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700319static const u32 tuning_block_64[] = {
320 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
321 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
322 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
323 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
324};
325
326static const u32 tuning_block_128[] = {
327 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
328 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
329 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
330 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
331 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
332 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
333 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
334 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
335};
Asutosh Das0ef24812012-12-18 16:14:02 +0530336
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700337/* global to hold each slot instance for debug */
338static struct sdhci_msm_host *sdhci_slot[2];
339
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700340static int disable_slots;
341/* root can write, others read */
342module_param(disable_slots, int, S_IRUGO|S_IWUSR);
343
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530344static bool nocmdq;
345module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
346
Asutosh Das0ef24812012-12-18 16:14:02 +0530347enum vdd_io_level {
348 /* set vdd_io_data->low_vol_level */
349 VDD_IO_LOW,
350 /* set vdd_io_data->high_vol_level */
351 VDD_IO_HIGH,
352 /*
353 * set whatever there in voltage_level (third argument) of
354 * sdhci_msm_set_vdd_io_vol() function.
355 */
356 VDD_IO_SET_LEVEL,
357};
358
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700359/* MSM platform specific tuning */
360static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
361 u8 poll)
362{
363 int rc = 0;
364 u32 wait_cnt = 50;
365 u8 ck_out_en = 0;
366 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530367 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
368 struct sdhci_msm_host *msm_host = pltfm_host->priv;
369 const struct sdhci_msm_offset *msm_host_offset =
370 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700371
372 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530373 ck_out_en = !!(readl_relaxed(host->ioaddr +
374 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700375
376 while (ck_out_en != poll) {
377 if (--wait_cnt == 0) {
378 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
379 mmc_hostname(mmc), __func__, poll);
380 rc = -ETIMEDOUT;
381 goto out;
382 }
383 udelay(1);
384
385 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530386 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700387 }
388out:
389 return rc;
390}
391
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530392/*
393 * Enable CDR to track changes of DAT lines and adjust sampling
394 * point according to voltage/temperature variations
395 */
396static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
397{
398 int rc = 0;
399 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530400 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
401 struct sdhci_msm_host *msm_host = pltfm_host->priv;
402 const struct sdhci_msm_offset *msm_host_offset =
403 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530404
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530405 config = readl_relaxed(host->ioaddr +
406 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530407 config |= CORE_CDR_EN;
408 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530409 writel_relaxed(config, host->ioaddr +
410 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530411
412 rc = msm_dll_poll_ck_out_en(host, 0);
413 if (rc)
414 goto err;
415
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530416 writel_relaxed((readl_relaxed(host->ioaddr +
417 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
418 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530419
420 rc = msm_dll_poll_ck_out_en(host, 1);
421 if (rc)
422 goto err;
423 goto out;
424err:
425 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
426out:
427 return rc;
428}
429
430static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
431 *attr, const char *buf, size_t count)
432{
433 struct sdhci_host *host = dev_get_drvdata(dev);
434 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
435 struct sdhci_msm_host *msm_host = pltfm_host->priv;
436 u32 tmp;
437 unsigned long flags;
438
439 if (!kstrtou32(buf, 0, &tmp)) {
440 spin_lock_irqsave(&host->lock, flags);
441 msm_host->en_auto_cmd21 = !!tmp;
442 spin_unlock_irqrestore(&host->lock, flags);
443 }
444 return count;
445}
446
447static ssize_t show_auto_cmd21(struct device *dev,
448 struct device_attribute *attr, char *buf)
449{
450 struct sdhci_host *host = dev_get_drvdata(dev);
451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
453
454 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
455}
456
457/* MSM auto-tuning handler */
458static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
459 bool enable,
460 u32 type)
461{
462 int rc = 0;
463 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
464 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530465 const struct sdhci_msm_offset *msm_host_offset =
466 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530467 u32 val = 0;
468
469 if (!msm_host->en_auto_cmd21)
470 return 0;
471
472 if (type == MMC_SEND_TUNING_BLOCK_HS200)
473 val = CORE_HC_AUTO_CMD21_EN;
474 else
475 return 0;
476
477 if (enable) {
478 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530479 writel_relaxed(readl_relaxed(host->ioaddr +
480 msm_host_offset->CORE_VENDOR_SPEC) | val,
481 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530482 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530483 writel_relaxed(readl_relaxed(host->ioaddr +
484 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
485 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530486 }
487 return rc;
488}
489
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700490static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
491{
492 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530493 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
494 struct sdhci_msm_host *msm_host = pltfm_host->priv;
495 const struct sdhci_msm_offset *msm_host_offset =
496 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700497 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
498 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
499 0x8};
500 unsigned long flags;
501 u32 config;
502 struct mmc_host *mmc = host->mmc;
503
504 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
505 spin_lock_irqsave(&host->lock, flags);
506
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530507 config = readl_relaxed(host->ioaddr +
508 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700509 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
510 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530511 writel_relaxed(config, host->ioaddr +
512 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700513
514 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
515 rc = msm_dll_poll_ck_out_en(host, 0);
516 if (rc)
517 goto err_out;
518
519 /*
520 * Write the selected DLL clock output phase (0 ... 15)
521 * to CDR_SELEXT bit field of DLL_CONFIG register.
522 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530523 writel_relaxed(((readl_relaxed(host->ioaddr +
524 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700525 & ~(0xF << 20))
526 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700528
529 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 writel_relaxed((readl_relaxed(host->ioaddr +
531 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
532 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700533
534 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
535 rc = msm_dll_poll_ck_out_en(host, 1);
536 if (rc)
537 goto err_out;
538
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530539 config = readl_relaxed(host->ioaddr +
540 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700541 config |= CORE_CDR_EN;
542 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530543 writel_relaxed(config, host->ioaddr +
544 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700545 goto out;
546
547err_out:
548 pr_err("%s: %s: Failed to set DLL phase: %d\n",
549 mmc_hostname(mmc), __func__, phase);
550out:
551 spin_unlock_irqrestore(&host->lock, flags);
552 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
553 return rc;
554}
555
556/*
557 * Find out the greatest range of consecuitive selected
558 * DLL clock output phases that can be used as sampling
559 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700560 * timing mode) or for eMMC4.5 card read operation (in
561 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562 * Select the 3/4 of the range and configure the DLL with the
563 * selected DLL clock output phase.
564 */
565
566static int msm_find_most_appropriate_phase(struct sdhci_host *host,
567 u8 *phase_table, u8 total_phases)
568{
569 int ret;
570 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
571 u8 phases_per_row[MAX_PHASES] = {0};
572 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
573 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
574 bool phase_0_found = false, phase_15_found = false;
575 struct mmc_host *mmc = host->mmc;
576
577 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
578 if (!total_phases || (total_phases > MAX_PHASES)) {
579 pr_err("%s: %s: invalid argument: total_phases=%d\n",
580 mmc_hostname(mmc), __func__, total_phases);
581 return -EINVAL;
582 }
583
584 for (cnt = 0; cnt < total_phases; cnt++) {
585 ranges[row_index][col_index] = phase_table[cnt];
586 phases_per_row[row_index] += 1;
587 col_index++;
588
589 if ((cnt + 1) == total_phases) {
590 continue;
591 /* check if next phase in phase_table is consecutive or not */
592 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
593 row_index++;
594 col_index = 0;
595 }
596 }
597
598 if (row_index >= MAX_PHASES)
599 return -EINVAL;
600
601 /* Check if phase-0 is present in first valid window? */
602 if (!ranges[0][0]) {
603 phase_0_found = true;
604 phase_0_raw_index = 0;
605 /* Check if cycle exist between 2 valid windows */
606 for (cnt = 1; cnt <= row_index; cnt++) {
607 if (phases_per_row[cnt]) {
608 for (i = 0; i < phases_per_row[cnt]; i++) {
609 if (ranges[cnt][i] == 15) {
610 phase_15_found = true;
611 phase_15_raw_index = cnt;
612 break;
613 }
614 }
615 }
616 }
617 }
618
619 /* If 2 valid windows form cycle then merge them as single window */
620 if (phase_0_found && phase_15_found) {
621 /* number of phases in raw where phase 0 is present */
622 u8 phases_0 = phases_per_row[phase_0_raw_index];
623 /* number of phases in raw where phase 15 is present */
624 u8 phases_15 = phases_per_row[phase_15_raw_index];
625
626 if (phases_0 + phases_15 >= MAX_PHASES)
627 /*
628 * If there are more than 1 phase windows then total
629 * number of phases in both the windows should not be
630 * more than or equal to MAX_PHASES.
631 */
632 return -EINVAL;
633
634 /* Merge 2 cyclic windows */
635 i = phases_15;
636 for (cnt = 0; cnt < phases_0; cnt++) {
637 ranges[phase_15_raw_index][i] =
638 ranges[phase_0_raw_index][cnt];
639 if (++i >= MAX_PHASES)
640 break;
641 }
642
643 phases_per_row[phase_0_raw_index] = 0;
644 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
645 }
646
647 for (cnt = 0; cnt <= row_index; cnt++) {
648 if (phases_per_row[cnt] > curr_max) {
649 curr_max = phases_per_row[cnt];
650 selected_row_index = cnt;
651 }
652 }
653
654 i = ((curr_max * 3) / 4);
655 if (i)
656 i--;
657
658 ret = (int)ranges[selected_row_index][i];
659
660 if (ret >= MAX_PHASES) {
661 ret = -EINVAL;
662 pr_err("%s: %s: invalid phase selected=%d\n",
663 mmc_hostname(mmc), __func__, ret);
664 }
665
666 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
667 return ret;
668}
669
670static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
671{
672 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530673 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
674 struct sdhci_msm_host *msm_host = pltfm_host->priv;
675 const struct sdhci_msm_offset *msm_host_offset =
676 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700677
678 /* Program the MCLK value to MCLK_FREQ bit field */
679 if (host->clock <= 112000000)
680 mclk_freq = 0;
681 else if (host->clock <= 125000000)
682 mclk_freq = 1;
683 else if (host->clock <= 137000000)
684 mclk_freq = 2;
685 else if (host->clock <= 150000000)
686 mclk_freq = 3;
687 else if (host->clock <= 162000000)
688 mclk_freq = 4;
689 else if (host->clock <= 175000000)
690 mclk_freq = 5;
691 else if (host->clock <= 187000000)
692 mclk_freq = 6;
693 else if (host->clock <= 200000000)
694 mclk_freq = 7;
695
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530696 writel_relaxed(((readl_relaxed(host->ioaddr +
697 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700698 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530699 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700}
701
702/* Initialize the DLL (Programmable Delay Line ) */
703static int msm_init_cm_dll(struct sdhci_host *host)
704{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800705 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
706 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530707 const struct sdhci_msm_offset *msm_host_offset =
708 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700709 struct mmc_host *mmc = host->mmc;
710 int rc = 0;
711 unsigned long flags;
712 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530713 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714
715 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
716 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530717 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
718 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530719 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700720 /*
721 * Make sure that clock is always enabled when DLL
722 * tuning is in progress. Keeping PWRSAVE ON may
723 * turn off the clock. So let's disable the PWRSAVE
724 * here and re-enable it once tuning is completed.
725 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530726 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530727 writel_relaxed((readl_relaxed(host->ioaddr +
728 msm_host_offset->CORE_VENDOR_SPEC)
729 & ~CORE_CLK_PWRSAVE), host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530731 curr_pwrsave = false;
732 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700733
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800734 if (msm_host->use_updated_dll_reset) {
735 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530736 writel_relaxed((readl_relaxed(host->ioaddr +
737 msm_host_offset->CORE_DLL_CONFIG)
738 & ~CORE_CK_OUT_EN), host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800740
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530741 writel_relaxed((readl_relaxed(host->ioaddr +
742 msm_host_offset->CORE_DLL_CONFIG_2)
743 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800745 }
746
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700747 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
750 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700751
752 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530753 writel_relaxed((readl_relaxed(host->ioaddr +
754 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
755 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700756 msm_cm_dll_set_freq(host);
757
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800758 if (msm_host->use_updated_dll_reset) {
759 u32 mclk_freq = 0;
760
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530761 if ((readl_relaxed(host->ioaddr +
762 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800763 & CORE_FLL_CYCLE_CNT))
764 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
765 else
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
767
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530768 writel_relaxed(((readl_relaxed(host->ioaddr +
769 msm_host_offset->CORE_DLL_CONFIG_2)
770 & ~(0xFF << 10)) | (mclk_freq << 10)),
771 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800772 /* wait for 5us before enabling DLL clock */
773 udelay(5);
774 }
775
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700776 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530777 writel_relaxed((readl_relaxed(host->ioaddr +
778 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
779 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700780
781 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530782 writel_relaxed((readl_relaxed(host->ioaddr +
783 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
784 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700785
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800786 if (msm_host->use_updated_dll_reset) {
787 msm_cm_dll_set_freq(host);
788 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530789 writel_relaxed((readl_relaxed(host->ioaddr +
790 msm_host_offset->CORE_DLL_CONFIG_2)
791 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800793 }
794
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700795 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530796 writel_relaxed((readl_relaxed(host->ioaddr +
797 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
798 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700799
800 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530801 writel_relaxed((readl_relaxed(host->ioaddr +
802 msm_host_offset->CORE_DLL_CONFIG)
803 | CORE_CK_OUT_EN), host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700805
806 wait_cnt = 50;
807 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530808 while (!(readl_relaxed(host->ioaddr +
809 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700810 /* max. wait for 50us sec for LOCK bit to be set */
811 if (--wait_cnt == 0) {
812 pr_err("%s: %s: DLL failed to LOCK\n",
813 mmc_hostname(mmc), __func__);
814 rc = -ETIMEDOUT;
815 goto out;
816 }
817 /* wait for 1us before polling again */
818 udelay(1);
819 }
820
821out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530822 /* Restore the correct PWRSAVE state */
823 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530824 u32 reg = readl_relaxed(host->ioaddr +
825 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530826
827 if (prev_pwrsave)
828 reg |= CORE_CLK_PWRSAVE;
829 else
830 reg &= ~CORE_CLK_PWRSAVE;
831
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530832 writel_relaxed(reg, host->ioaddr +
833 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530834 }
835
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700836 spin_unlock_irqrestore(&host->lock, flags);
837 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
838 return rc;
839}
840
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700841static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
842{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700843 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844 int ret = 0;
845 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530846 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
847 struct sdhci_msm_host *msm_host = pltfm_host->priv;
848 const struct sdhci_msm_offset *msm_host_offset =
849 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850
851 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
852
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530854 writel_relaxed((readl_relaxed(host->ioaddr +
855 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700856 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530857 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858
859 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
860 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
861 & ~CORE_CDC_SWITCH_BYPASS_OFF),
862 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
863
864 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
865 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
866 | CORE_CDC_SWITCH_RC_EN),
867 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868
869 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 writel_relaxed((readl_relaxed(host->ioaddr +
871 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700872 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874
875 /*
876 * Perform CDC Register Initialization Sequence
877 *
878 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
879 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
880 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
881 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
882 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
883 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
884 * CORE_CSR_CDC_DELAY_CFG 0x3AC
885 * CORE_CDC_OFFSET_CFG 0x0
886 * CORE_CDC_SLAVE_DDA_CFG 0x16334
887 */
888
889 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
891 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
892 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
893 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
894 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700895 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700896 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
897 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
898
899 /* CDC HW Calibration */
900
901 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
902 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
903 | CORE_SW_TRIG_FULL_CALIB),
904 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
905
906 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
907 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
908 & ~CORE_SW_TRIG_FULL_CALIB),
909 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
910
911 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
912 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
913 | CORE_HW_AUTOCAL_ENA),
914 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
915
916 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
917 writel_relaxed((readl_relaxed(host->ioaddr +
918 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
919 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
920
921 mb();
922
923 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700924 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
925 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
926
927 if (ret == -ETIMEDOUT) {
928 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700929 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700930 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 }
932
933 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
934 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
935 & CORE_CDC_ERROR_CODE_MASK;
936 if (cdc_err) {
937 pr_err("%s: %s: CDC Error Code %d\n",
938 mmc_hostname(host->mmc), __func__, cdc_err);
939 ret = -EINVAL;
940 goto out;
941 }
942
943 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530944 writel_relaxed((readl_relaxed(host->ioaddr +
945 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700946 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530947 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948out:
949 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
950 __func__, ret);
951 return ret;
952}
953
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700954static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
955{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530956 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
957 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530958 const struct sdhci_msm_offset *msm_host_offset =
959 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530960 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700961 int ret = 0;
962
963 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
964
965 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530966 * Reprogramming the value in case it might have been modified by
967 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700968 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700969 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530970 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
971 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700972 } else {
973 ddr_config = DDR_CONFIG_POR_VAL &
974 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
975 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 writel_relaxed(ddr_config, host->ioaddr +
977 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700978 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700979
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530980 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed((readl_relaxed(host->ioaddr +
982 msm_host_offset->CORE_DDR_200_CFG)
983 | CORE_CMDIN_RCLK_EN), host->ioaddr +
984 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530985
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 writel_relaxed((readl_relaxed(host->ioaddr +
988 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700989 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530990 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991
992 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530993 ret = readl_poll_timeout(host->ioaddr +
994 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700995 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
996
997 if (ret == -ETIMEDOUT) {
998 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
999 mmc_hostname(host->mmc), __func__);
1000 goto out;
1001 }
1002
Ritesh Harjani764065e2015-05-13 14:14:45 +05301003 /*
1004 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1005 * when MCLK is gated OFF, it is not gated for less than 0.5us
1006 * and MCLK must be switched on for at-least 1us before DATA
1007 * starts coming. Controllers with 14lpp tech DLL cannot
1008 * guarantee above requirement. So PWRSAVE_DLL should not be
1009 * turned on for host controllers using this DLL.
1010 */
1011 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301012 writel_relaxed((readl_relaxed(host->ioaddr +
1013 msm_host_offset->CORE_VENDOR_SPEC3)
1014 | CORE_PWRSAVE_DLL), host->ioaddr +
1015 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001016 mb();
1017out:
1018 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1019 __func__, ret);
1020 return ret;
1021}
1022
Ritesh Harjaniea709662015-05-27 15:40:24 +05301023static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1024{
1025 int ret = 0;
1026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1028 struct mmc_host *mmc = host->mmc;
1029
1030 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1031
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301032 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1033 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301034 mmc_hostname(mmc));
1035 return -EINVAL;
1036 }
1037
1038 if (msm_host->calibration_done ||
1039 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1040 return 0;
1041 }
1042
1043 /*
1044 * Reset the tuning block.
1045 */
1046 ret = msm_init_cm_dll(host);
1047 if (ret)
1048 goto out;
1049
1050 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1051out:
1052 if (!ret)
1053 msm_host->calibration_done = true;
1054 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1055 __func__, ret);
1056 return ret;
1057}
1058
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001059static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1060{
1061 int ret = 0;
1062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1063 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301064 const struct sdhci_msm_offset *msm_host_offset =
1065 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001066
1067 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1068
1069 /*
1070 * Retuning in HS400 (DDR mode) will fail, just reset the
1071 * tuning block and restore the saved tuning phase.
1072 */
1073 ret = msm_init_cm_dll(host);
1074 if (ret)
1075 goto out;
1076
1077 /* Set the selected phase in delay line hw block */
1078 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1079 if (ret)
1080 goto out;
1081
Krishna Konda0e8efba2014-06-23 14:50:38 -07001082 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301083 writel_relaxed((readl_relaxed(host->ioaddr +
1084 msm_host_offset->CORE_DLL_CONFIG)
1085 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1086 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001087
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001088 if (msm_host->use_cdclp533)
1089 /* Calibrate CDCLP533 DLL HW */
1090 ret = sdhci_msm_cdclp533_calibration(host);
1091 else
1092 /* Calibrate CM_DLL_SDC4 HW */
1093 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1094out:
1095 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1096 __func__, ret);
1097 return ret;
1098}
1099
Krishna Konda96e6b112013-10-28 15:25:03 -07001100static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1101 u8 drv_type)
1102{
1103 struct mmc_command cmd = {0};
1104 struct mmc_request mrq = {NULL};
1105 struct mmc_host *mmc = host->mmc;
1106 u8 val = ((drv_type << 4) | 2);
1107
1108 cmd.opcode = MMC_SWITCH;
1109 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1110 (EXT_CSD_HS_TIMING << 16) |
1111 (val << 8) |
1112 EXT_CSD_CMD_SET_NORMAL;
1113 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1114 /* 1 sec */
1115 cmd.busy_timeout = 1000 * 1000;
1116
1117 memset(cmd.resp, 0, sizeof(cmd.resp));
1118 cmd.retries = 3;
1119
1120 mrq.cmd = &cmd;
1121 cmd.data = NULL;
1122
1123 mmc_wait_for_req(mmc, &mrq);
1124 pr_debug("%s: %s: set card drive type to %d\n",
1125 mmc_hostname(mmc), __func__,
1126 drv_type);
1127}
1128
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001129int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1130{
1131 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301132 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001133 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001134 const u32 *tuning_block_pattern = tuning_block_64;
1135 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1136 int rc;
1137 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301138 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001141 u8 drv_type = 0;
1142 bool drv_type_changed = false;
1143 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301144 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301145
1146 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001147 * Tuning is required for SDR104, HS200 and HS400 cards and
1148 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301149 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001150 if (host->clock <= CORE_FREQ_100MHZ ||
1151 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1152 (ios.timing == MMC_TIMING_MMC_HS200) ||
1153 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301154 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001155
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301156 /*
1157 * Don't allow re-tuning for CRC errors observed for any commands
1158 * that are sent during tuning sequence itself.
1159 */
1160 if (msm_host->tuning_in_progress)
1161 return 0;
1162 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001163 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001164
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001165 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001166 if (msm_host->tuning_done && !msm_host->calibration_done &&
1167 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001168 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001169 spin_lock_irqsave(&host->lock, flags);
1170 if (!rc)
1171 msm_host->calibration_done = true;
1172 spin_unlock_irqrestore(&host->lock, flags);
1173 goto out;
1174 }
1175
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001176 spin_lock_irqsave(&host->lock, flags);
1177
1178 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1179 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1180 tuning_block_pattern = tuning_block_128;
1181 size = sizeof(tuning_block_128);
1182 }
1183 spin_unlock_irqrestore(&host->lock, flags);
1184
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001185 data_buf = kmalloc(size, GFP_KERNEL);
1186 if (!data_buf) {
1187 rc = -ENOMEM;
1188 goto out;
1189 }
1190
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301191retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001192 tuned_phase_cnt = 0;
1193
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301194 /* first of all reset the tuning block */
1195 rc = msm_init_cm_dll(host);
1196 if (rc)
1197 goto kfree;
1198
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001199 phase = 0;
1200 do {
1201 struct mmc_command cmd = {0};
1202 struct mmc_data data = {0};
1203 struct mmc_request mrq = {
1204 .cmd = &cmd,
1205 .data = &data
1206 };
1207 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301208 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001209
1210 /* set the phase in delay line hw block */
1211 rc = msm_config_cm_dll_phase(host, phase);
1212 if (rc)
1213 goto kfree;
1214
1215 cmd.opcode = opcode;
1216 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1217
1218 data.blksz = size;
1219 data.blocks = 1;
1220 data.flags = MMC_DATA_READ;
1221 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1222
1223 data.sg = &sg;
1224 data.sg_len = 1;
1225 sg_init_one(&sg, data_buf, size);
1226 memset(data_buf, 0, size);
1227 mmc_wait_for_req(mmc, &mrq);
1228
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301229 if (card && (cmd.error || data.error)) {
1230 sts_cmd.opcode = MMC_SEND_STATUS;
1231 sts_cmd.arg = card->rca << 16;
1232 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1233 sts_retry = 5;
1234 while (sts_retry) {
1235 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1236
1237 if (sts_cmd.error ||
1238 (R1_CURRENT_STATE(sts_cmd.resp[0])
1239 != R1_STATE_TRAN)) {
1240 sts_retry--;
1241 /*
1242 * wait for at least 146 MCLK cycles for
1243 * the card to move to TRANS state. As
1244 * the MCLK would be min 200MHz for
1245 * tuning, we need max 0.73us delay. To
1246 * be on safer side 1ms delay is given.
1247 */
1248 usleep_range(1000, 1200);
1249 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1250 mmc_hostname(mmc), phase,
1251 sts_cmd.error, sts_cmd.resp[0]);
1252 continue;
1253 }
1254 break;
1255 };
1256 }
1257
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001258 if (!cmd.error && !data.error &&
1259 !memcmp(data_buf, tuning_block_pattern, size)) {
1260 /* tuning is successful at this tuning point */
1261 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001262 pr_debug("%s: %s: found *** good *** phase = %d\n",
1263 mmc_hostname(mmc), __func__, phase);
1264 } else {
1265 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001266 mmc_hostname(mmc), __func__, phase);
1267 }
1268 } while (++phase < 16);
1269
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301270 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1271 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001272 /*
1273 * If all phases pass then its a problem. So change the card's
1274 * drive type to a different value, if supported and repeat
1275 * tuning until at least one phase fails. Then set the original
1276 * drive type back.
1277 *
1278 * If all the phases still pass after trying all possible
1279 * drive types, then one of those 16 phases will be picked.
1280 * This is no different from what was going on before the
1281 * modification to change drive type and retune.
1282 */
1283 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1284 tuned_phase_cnt);
1285
1286 /* set drive type to other value . default setting is 0x0 */
1287 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001288 pr_debug("%s: trying different drive strength (%d)\n",
1289 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001290 if (card->ext_csd.raw_driver_strength &
1291 (1 << drv_type)) {
1292 sdhci_msm_set_mmc_drv_type(host, opcode,
1293 drv_type);
1294 if (!drv_type_changed)
1295 drv_type_changed = true;
1296 goto retry;
1297 }
1298 }
1299 }
1300
1301 /* reset drive type to default (50 ohm) if changed */
1302 if (drv_type_changed)
1303 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1304
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001305 if (tuned_phase_cnt) {
1306 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1307 tuned_phase_cnt);
1308 if (rc < 0)
1309 goto kfree;
1310 else
1311 phase = (u8)rc;
1312
1313 /*
1314 * Finally set the selected phase in delay
1315 * line hw block.
1316 */
1317 rc = msm_config_cm_dll_phase(host, phase);
1318 if (rc)
1319 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001320 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001321 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1322 mmc_hostname(mmc), __func__, phase);
1323 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301324 if (--tuning_seq_cnt)
1325 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001326 /* tuning failed */
1327 pr_err("%s: %s: no tuning point found\n",
1328 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301329 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001330 }
1331
1332kfree:
1333 kfree(data_buf);
1334out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001335 spin_lock_irqsave(&host->lock, flags);
1336 if (!rc)
1337 msm_host->tuning_done = true;
1338 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301339 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001340 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001341 return rc;
1342}
1343
Asutosh Das0ef24812012-12-18 16:14:02 +05301344static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1345{
1346 struct sdhci_msm_gpio_data *curr;
1347 int i, ret = 0;
1348
1349 curr = pdata->pin_data->gpio_data;
1350 for (i = 0; i < curr->size; i++) {
1351 if (!gpio_is_valid(curr->gpio[i].no)) {
1352 ret = -EINVAL;
1353 pr_err("%s: Invalid gpio = %d\n", __func__,
1354 curr->gpio[i].no);
1355 goto free_gpios;
1356 }
1357 if (enable) {
1358 ret = gpio_request(curr->gpio[i].no,
1359 curr->gpio[i].name);
1360 if (ret) {
1361 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1362 __func__, curr->gpio[i].no,
1363 curr->gpio[i].name, ret);
1364 goto free_gpios;
1365 }
1366 curr->gpio[i].is_enabled = true;
1367 } else {
1368 gpio_free(curr->gpio[i].no);
1369 curr->gpio[i].is_enabled = false;
1370 }
1371 }
1372 return ret;
1373
1374free_gpios:
1375 for (i--; i >= 0; i--) {
1376 gpio_free(curr->gpio[i].no);
1377 curr->gpio[i].is_enabled = false;
1378 }
1379 return ret;
1380}
1381
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301382static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1383 bool enable)
1384{
1385 int ret = 0;
1386
1387 if (enable)
1388 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1389 pdata->pctrl_data->pins_active);
1390 else
1391 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1392 pdata->pctrl_data->pins_sleep);
1393
1394 if (ret < 0)
1395 pr_err("%s state for pinctrl failed with %d\n",
1396 enable ? "Enabling" : "Disabling", ret);
1397
1398 return ret;
1399}
1400
Asutosh Das0ef24812012-12-18 16:14:02 +05301401static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1402{
1403 int ret = 0;
1404
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301405 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301406 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301407 } else if (pdata->pctrl_data) {
1408 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1409 goto out;
1410 } else if (!pdata->pin_data) {
1411 return 0;
1412 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301413
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301414 if (pdata->pin_data->is_gpio)
1415 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301416out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301417 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301418 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301419
1420 return ret;
1421}
1422
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301423static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1424 u32 **out, int *len, u32 size)
1425{
1426 int ret = 0;
1427 struct device_node *np = dev->of_node;
1428 size_t sz;
1429 u32 *arr = NULL;
1430
1431 if (!of_get_property(np, prop_name, len)) {
1432 ret = -EINVAL;
1433 goto out;
1434 }
1435 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001436 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301437 dev_err(dev, "%s invalid size\n", prop_name);
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1443 if (!arr) {
1444 dev_err(dev, "%s failed allocating memory\n", prop_name);
1445 ret = -ENOMEM;
1446 goto out;
1447 }
1448
1449 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1450 if (ret < 0) {
1451 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1452 goto out;
1453 }
1454 *out = arr;
1455out:
1456 if (ret)
1457 *len = 0;
1458 return ret;
1459}
1460
Asutosh Das0ef24812012-12-18 16:14:02 +05301461#define MAX_PROP_SIZE 32
1462static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1463 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1464{
1465 int len, ret = 0;
1466 const __be32 *prop;
1467 char prop_name[MAX_PROP_SIZE];
1468 struct sdhci_msm_reg_data *vreg;
1469 struct device_node *np = dev->of_node;
1470
1471 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1472 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301473 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301474 return ret;
1475 }
1476
1477 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1478 if (!vreg) {
1479 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1480 ret = -ENOMEM;
1481 return ret;
1482 }
1483
1484 vreg->name = vreg_name;
1485
1486 snprintf(prop_name, MAX_PROP_SIZE,
1487 "qcom,%s-always-on", vreg_name);
1488 if (of_get_property(np, prop_name, NULL))
1489 vreg->is_always_on = true;
1490
1491 snprintf(prop_name, MAX_PROP_SIZE,
1492 "qcom,%s-lpm-sup", vreg_name);
1493 if (of_get_property(np, prop_name, NULL))
1494 vreg->lpm_sup = true;
1495
1496 snprintf(prop_name, MAX_PROP_SIZE,
1497 "qcom,%s-voltage-level", vreg_name);
1498 prop = of_get_property(np, prop_name, &len);
1499 if (!prop || (len != (2 * sizeof(__be32)))) {
1500 dev_warn(dev, "%s %s property\n",
1501 prop ? "invalid format" : "no", prop_name);
1502 } else {
1503 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1504 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1505 }
1506
1507 snprintf(prop_name, MAX_PROP_SIZE,
1508 "qcom,%s-current-level", vreg_name);
1509 prop = of_get_property(np, prop_name, &len);
1510 if (!prop || (len != (2 * sizeof(__be32)))) {
1511 dev_warn(dev, "%s %s property\n",
1512 prop ? "invalid format" : "no", prop_name);
1513 } else {
1514 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1515 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1516 }
1517
1518 *vreg_data = vreg;
1519 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1520 vreg->name, vreg->is_always_on ? "always_on," : "",
1521 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1522 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1523
1524 return ret;
1525}
1526
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301527static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1528 struct sdhci_msm_pltfm_data *pdata)
1529{
1530 struct sdhci_pinctrl_data *pctrl_data;
1531 struct pinctrl *pctrl;
1532 int ret = 0;
1533
1534 /* Try to obtain pinctrl handle */
1535 pctrl = devm_pinctrl_get(dev);
1536 if (IS_ERR(pctrl)) {
1537 ret = PTR_ERR(pctrl);
1538 goto out;
1539 }
1540 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1541 if (!pctrl_data) {
1542 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1543 ret = -ENOMEM;
1544 goto out;
1545 }
1546 pctrl_data->pctrl = pctrl;
1547 /* Look-up and keep the states handy to be used later */
1548 pctrl_data->pins_active = pinctrl_lookup_state(
1549 pctrl_data->pctrl, "active");
1550 if (IS_ERR(pctrl_data->pins_active)) {
1551 ret = PTR_ERR(pctrl_data->pins_active);
1552 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1553 goto out;
1554 }
1555 pctrl_data->pins_sleep = pinctrl_lookup_state(
1556 pctrl_data->pctrl, "sleep");
1557 if (IS_ERR(pctrl_data->pins_sleep)) {
1558 ret = PTR_ERR(pctrl_data->pins_sleep);
1559 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1560 goto out;
1561 }
1562 pdata->pctrl_data = pctrl_data;
1563out:
1564 return ret;
1565}
1566
Asutosh Das0ef24812012-12-18 16:14:02 +05301567#define GPIO_NAME_MAX_LEN 32
1568static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1569 struct sdhci_msm_pltfm_data *pdata)
1570{
1571 int ret = 0, cnt, i;
1572 struct sdhci_msm_pin_data *pin_data;
1573 struct device_node *np = dev->of_node;
1574
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301575 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1576 if (!ret) {
1577 goto out;
1578 } else if (ret == -EPROBE_DEFER) {
1579 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1580 goto out;
1581 } else {
1582 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1583 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301584 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301585 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301586 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1587 if (!pin_data) {
1588 dev_err(dev, "No memory for pin_data\n");
1589 ret = -ENOMEM;
1590 goto out;
1591 }
1592
1593 cnt = of_gpio_count(np);
1594 if (cnt > 0) {
1595 pin_data->gpio_data = devm_kzalloc(dev,
1596 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1597 if (!pin_data->gpio_data) {
1598 dev_err(dev, "No memory for gpio_data\n");
1599 ret = -ENOMEM;
1600 goto out;
1601 }
1602 pin_data->gpio_data->size = cnt;
1603 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1604 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1605
1606 if (!pin_data->gpio_data->gpio) {
1607 dev_err(dev, "No memory for gpio\n");
1608 ret = -ENOMEM;
1609 goto out;
1610 }
1611
1612 for (i = 0; i < cnt; i++) {
1613 const char *name = NULL;
1614 char result[GPIO_NAME_MAX_LEN];
1615 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1616 of_property_read_string_index(np,
1617 "qcom,gpio-names", i, &name);
1618
1619 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1620 dev_name(dev), name ? name : "?");
1621 pin_data->gpio_data->gpio[i].name = result;
1622 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1623 pin_data->gpio_data->gpio[i].name,
1624 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301625 }
1626 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301627 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301628out:
1629 if (ret)
1630 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1631 return ret;
1632}
1633
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001634#ifdef CONFIG_SMP
1635static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1636{
1637 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1638}
1639#else
1640static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1641#endif
1642
Gilad Bronerc788a672015-09-08 15:39:11 +03001643static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1644 struct sdhci_msm_pltfm_data *pdata)
1645{
1646 struct device_node *np = dev->of_node;
1647 const char *str;
1648 u32 cpu;
1649 int ret = 0;
1650 int i;
1651
1652 pdata->pm_qos_data.irq_valid = false;
1653 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1654 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1655 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001656 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001657 }
1658
1659 /* must specify cpu for "affine_cores" type */
1660 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1661 pdata->pm_qos_data.irq_cpu = -1;
1662 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1663 if (ret) {
1664 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1665 ret);
1666 goto out;
1667 }
1668 if (cpu < 0 || cpu >= num_possible_cpus()) {
1669 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1670 __func__, cpu, num_possible_cpus());
1671 ret = -EINVAL;
1672 goto out;
1673 }
1674 pdata->pm_qos_data.irq_cpu = cpu;
1675 }
1676
1677 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1678 SDHCI_POWER_POLICY_NUM) {
1679 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1680 __func__, SDHCI_POWER_POLICY_NUM);
1681 ret = -EINVAL;
1682 goto out;
1683 }
1684
1685 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1686 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1687 &pdata->pm_qos_data.irq_latency.latency[i]);
1688
1689 pdata->pm_qos_data.irq_valid = true;
1690out:
1691 return ret;
1692}
1693
1694static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1695 struct sdhci_msm_pltfm_data *pdata)
1696{
1697 struct device_node *np = dev->of_node;
1698 u32 mask;
1699 int nr_groups;
1700 int ret;
1701 int i;
1702
1703 /* Read cpu group mapping */
1704 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1705 if (nr_groups <= 0) {
1706 ret = -EINVAL;
1707 goto out;
1708 }
1709 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1710 pdata->pm_qos_data.cpu_group_map.mask =
1711 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1712 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1713 ret = -ENOMEM;
1714 goto out;
1715 }
1716
1717 for (i = 0; i < nr_groups; i++) {
1718 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1719 i, &mask);
1720
1721 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1722 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1723 cpu_possible_mask)) {
1724 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1725 __func__, mask, i);
1726 ret = -EINVAL;
1727 goto free_res;
1728 }
1729 }
1730 return 0;
1731
1732free_res:
1733 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1734out:
1735 return ret;
1736}
1737
1738static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1739 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1740{
1741 struct device_node *np = dev->of_node;
1742 struct sdhci_msm_pm_qos_latency *values;
1743 int ret;
1744 int i;
1745 int group;
1746 int cfg;
1747
1748 ret = of_property_count_u32_elems(np, name);
1749 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1750 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1751 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1752 ret);
1753 return -EINVAL;
1754 } else if (ret < 0) {
1755 return ret;
1756 }
1757
1758 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1759 GFP_KERNEL);
1760 if (!values)
1761 return -ENOMEM;
1762
1763 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1764 group = i / SDHCI_POWER_POLICY_NUM;
1765 cfg = i % SDHCI_POWER_POLICY_NUM;
1766 of_property_read_u32_index(np, name, i,
1767 &(values[group].latency[cfg]));
1768 }
1769
1770 *latency = values;
1771 return 0;
1772}
1773
1774static void sdhci_msm_pm_qos_parse(struct device *dev,
1775 struct sdhci_msm_pltfm_data *pdata)
1776{
1777 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1778 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1779 __func__);
1780
1781 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1782 pdata->pm_qos_data.cmdq_valid =
1783 !sdhci_msm_pm_qos_parse_latency(dev,
1784 "qcom,pm-qos-cmdq-latency-us",
1785 pdata->pm_qos_data.cpu_group_map.nr_groups,
1786 &pdata->pm_qos_data.cmdq_latency);
1787 pdata->pm_qos_data.legacy_valid =
1788 !sdhci_msm_pm_qos_parse_latency(dev,
1789 "qcom,pm-qos-legacy-latency-us",
1790 pdata->pm_qos_data.cpu_group_map.nr_groups,
1791 &pdata->pm_qos_data.latency);
1792 if (!pdata->pm_qos_data.cmdq_valid &&
1793 !pdata->pm_qos_data.legacy_valid) {
1794 /* clean-up previously allocated arrays */
1795 kfree(pdata->pm_qos_data.latency);
1796 kfree(pdata->pm_qos_data.cmdq_latency);
1797 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1798 __func__);
1799 }
1800 } else {
1801 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1802 __func__);
1803 }
1804}
1805
Asutosh Das0ef24812012-12-18 16:14:02 +05301806/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001807static
1808struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1809 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301810{
1811 struct sdhci_msm_pltfm_data *pdata = NULL;
1812 struct device_node *np = dev->of_node;
1813 u32 bus_width = 0;
1814 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301815 int clk_table_len;
1816 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301817 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301818 const char *lower_bus_speed = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301819
1820 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1821 if (!pdata) {
1822 dev_err(dev, "failed to allocate memory for platform data\n");
1823 goto out;
1824 }
1825
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301826 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001827 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301828 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301829
Asutosh Das0ef24812012-12-18 16:14:02 +05301830 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1831 if (bus_width == 8)
1832 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1833 else if (bus_width == 4)
1834 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1835 else {
1836 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1837 pdata->mmc_bus_width = 0;
1838 }
1839
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001840 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301841 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1842 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001843 pr_debug("%s: no clock scaling frequencies were supplied\n",
1844 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301845 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1846 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1847 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001848
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301849 /*
1850 * Few hosts can support DDR52 mode at the same lower
1851 * system voltage corner as high-speed mode. In such cases,
1852 * it is always better to put it in DDR mode which will
1853 * improve the performance without any power impact.
1854 */
1855 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
1856 &lower_bus_speed)) {
1857 if (!strcmp(lower_bus_speed, "DDR52"))
1858 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
1859 MMC_SCALING_LOWER_DDR52_MODE;
1860 }
1861
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301862 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1863 &clk_table, &clk_table_len, 0)) {
1864 dev_err(dev, "failed parsing supported clock rates\n");
1865 goto out;
1866 }
1867 if (!clk_table || !clk_table_len) {
1868 dev_err(dev, "Invalid clock table\n");
1869 goto out;
1870 }
1871 pdata->sup_clk_table = clk_table;
1872 pdata->sup_clk_cnt = clk_table_len;
1873
Asutosh Das0ef24812012-12-18 16:14:02 +05301874 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1875 sdhci_msm_slot_reg_data),
1876 GFP_KERNEL);
1877 if (!pdata->vreg_data) {
1878 dev_err(dev, "failed to allocate memory for vreg data\n");
1879 goto out;
1880 }
1881
1882 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1883 "vdd")) {
1884 dev_err(dev, "failed parsing vdd data\n");
1885 goto out;
1886 }
1887 if (sdhci_msm_dt_parse_vreg_info(dev,
1888 &pdata->vreg_data->vdd_io_data,
1889 "vdd-io")) {
1890 dev_err(dev, "failed parsing vdd-io data\n");
1891 goto out;
1892 }
1893
1894 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1895 dev_err(dev, "failed parsing gpio data\n");
1896 goto out;
1897 }
1898
Asutosh Das0ef24812012-12-18 16:14:02 +05301899 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1900
1901 for (i = 0; i < len; i++) {
1902 const char *name = NULL;
1903
1904 of_property_read_string_index(np,
1905 "qcom,bus-speed-mode", i, &name);
1906 if (!name)
1907 continue;
1908
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001909 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1910 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1911 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1912 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1913 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301914 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1915 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1916 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1917 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1918 pdata->caps |= MMC_CAP_1_8V_DDR
1919 | MMC_CAP_UHS_DDR50;
1920 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1921 pdata->caps |= MMC_CAP_1_2V_DDR
1922 | MMC_CAP_UHS_DDR50;
1923 }
1924
1925 if (of_get_property(np, "qcom,nonremovable", NULL))
1926 pdata->nonremovable = true;
1927
Guoping Yuf7c91332014-08-20 16:56:18 +08001928 if (of_get_property(np, "qcom,nonhotplug", NULL))
1929 pdata->nonhotplug = true;
1930
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001931 pdata->largeaddressbus =
1932 of_property_read_bool(np, "qcom,large-address-bus");
1933
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001934 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1935 msm_host->mmc->wakeup_on_idle = true;
1936
Gilad Bronerc788a672015-09-08 15:39:11 +03001937 sdhci_msm_pm_qos_parse(dev, pdata);
1938
Pavan Anamula5a256df2015-10-16 14:38:28 +05301939 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05301940 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05301941
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001942 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07001943 msm_host->regs_restore.is_supported =
1944 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07001945
Asutosh Das0ef24812012-12-18 16:14:02 +05301946 return pdata;
1947out:
1948 return NULL;
1949}
1950
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301951/* Returns required bandwidth in Bytes per Sec */
1952static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1953 struct mmc_ios *ios)
1954{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301955 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1956 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1957
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301958 unsigned int bw;
1959
Sahitya Tummala2886c922013-04-03 18:03:31 +05301960 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301961 /*
1962 * For DDR mode, SDCC controller clock will be at
1963 * the double rate than the actual clock that goes to card.
1964 */
1965 if (ios->bus_width == MMC_BUS_WIDTH_4)
1966 bw /= 2;
1967 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1968 bw /= 8;
1969
1970 return bw;
1971}
1972
1973static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1974 unsigned int bw)
1975{
1976 unsigned int *table = host->pdata->voting_data->bw_vecs;
1977 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1978 int i;
1979
1980 if (host->msm_bus_vote.is_max_bw_needed && bw)
1981 return host->msm_bus_vote.max_bw_vote;
1982
1983 for (i = 0; i < size; i++) {
1984 if (bw <= table[i])
1985 break;
1986 }
1987
1988 if (i && (i == size))
1989 i--;
1990
1991 return i;
1992}
1993
1994/*
1995 * This function must be called with host lock acquired.
1996 * Caller of this function should also ensure that msm bus client
1997 * handle is not null.
1998 */
1999static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2000 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302001 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302002{
2003 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2004 int rc = 0;
2005
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302006 BUG_ON(!flags);
2007
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302008 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302009 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302010 rc = msm_bus_scale_client_update_request(
2011 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302012 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302013 if (rc) {
2014 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2015 mmc_hostname(host->mmc),
2016 msm_host->msm_bus_vote.client_handle, vote, rc);
2017 goto out;
2018 }
2019 msm_host->msm_bus_vote.curr_vote = vote;
2020 }
2021out:
2022 return rc;
2023}
2024
2025/*
2026 * Internal work. Work to set 0 bandwidth for msm bus.
2027 */
2028static void sdhci_msm_bus_work(struct work_struct *work)
2029{
2030 struct sdhci_msm_host *msm_host;
2031 struct sdhci_host *host;
2032 unsigned long flags;
2033
2034 msm_host = container_of(work, struct sdhci_msm_host,
2035 msm_bus_vote.vote_work.work);
2036 host = platform_get_drvdata(msm_host->pdev);
2037
2038 if (!msm_host->msm_bus_vote.client_handle)
2039 return;
2040
2041 spin_lock_irqsave(&host->lock, flags);
2042 /* don't vote for 0 bandwidth if any request is in progress */
2043 if (!host->mrq) {
2044 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302045 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302046 } else
2047 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2048 mmc_hostname(host->mmc), __func__);
2049 spin_unlock_irqrestore(&host->lock, flags);
2050}
2051
2052/*
2053 * This function cancels any scheduled delayed work and sets the bus
2054 * vote based on bw (bandwidth) argument.
2055 */
2056static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2057 unsigned int bw)
2058{
2059 int vote;
2060 unsigned long flags;
2061 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2062 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2063
2064 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2065 spin_lock_irqsave(&host->lock, flags);
2066 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302067 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302068 spin_unlock_irqrestore(&host->lock, flags);
2069}
2070
2071#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2072
2073/* This function queues a work which will set the bandwidth requiement to 0 */
2074static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2075{
2076 unsigned long flags;
2077 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2078 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2079
2080 spin_lock_irqsave(&host->lock, flags);
2081 if (msm_host->msm_bus_vote.min_bw_vote !=
2082 msm_host->msm_bus_vote.curr_vote)
2083 queue_delayed_work(system_wq,
2084 &msm_host->msm_bus_vote.vote_work,
2085 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2086 spin_unlock_irqrestore(&host->lock, flags);
2087}
2088
2089static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2090 struct platform_device *pdev)
2091{
2092 int rc = 0;
2093 struct msm_bus_scale_pdata *bus_pdata;
2094
2095 struct sdhci_msm_bus_voting_data *data;
2096 struct device *dev = &pdev->dev;
2097
2098 data = devm_kzalloc(dev,
2099 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2100 if (!data) {
2101 dev_err(&pdev->dev,
2102 "%s: failed to allocate memory\n", __func__);
2103 rc = -ENOMEM;
2104 goto out;
2105 }
2106 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2107 if (data->bus_pdata) {
2108 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2109 &data->bw_vecs, &data->bw_vecs_size, 0);
2110 if (rc) {
2111 dev_err(&pdev->dev,
2112 "%s: Failed to get bus-bw-vectors-bps\n",
2113 __func__);
2114 goto out;
2115 }
2116 host->pdata->voting_data = data;
2117 }
2118 if (host->pdata->voting_data &&
2119 host->pdata->voting_data->bus_pdata &&
2120 host->pdata->voting_data->bw_vecs &&
2121 host->pdata->voting_data->bw_vecs_size) {
2122
2123 bus_pdata = host->pdata->voting_data->bus_pdata;
2124 host->msm_bus_vote.client_handle =
2125 msm_bus_scale_register_client(bus_pdata);
2126 if (!host->msm_bus_vote.client_handle) {
2127 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2128 rc = -EFAULT;
2129 goto out;
2130 }
2131 /* cache the vote index for minimum and maximum bandwidth */
2132 host->msm_bus_vote.min_bw_vote =
2133 sdhci_msm_bus_get_vote_for_bw(host, 0);
2134 host->msm_bus_vote.max_bw_vote =
2135 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2136 } else {
2137 devm_kfree(dev, data);
2138 }
2139
2140out:
2141 return rc;
2142}
2143
2144static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2145{
2146 if (host->msm_bus_vote.client_handle)
2147 msm_bus_scale_unregister_client(
2148 host->msm_bus_vote.client_handle);
2149}
2150
2151static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2152{
2153 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2154 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2155 struct mmc_ios *ios = &host->mmc->ios;
2156 unsigned int bw;
2157
2158 if (!msm_host->msm_bus_vote.client_handle)
2159 return;
2160
2161 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302162 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302163 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302164 } else {
2165 /*
2166 * If clock gating is enabled, then remove the vote
2167 * immediately because clocks will be disabled only
2168 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2169 * additional delay is required to remove the bus vote.
2170 */
2171#ifdef CONFIG_MMC_CLKGATE
2172 if (host->mmc->clkgate_delay)
2173 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2174 else
2175#endif
2176 sdhci_msm_bus_queue_work(host);
2177 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302178}
2179
Asutosh Das0ef24812012-12-18 16:14:02 +05302180/* Regulator utility functions */
2181static int sdhci_msm_vreg_init_reg(struct device *dev,
2182 struct sdhci_msm_reg_data *vreg)
2183{
2184 int ret = 0;
2185
2186 /* check if regulator is already initialized? */
2187 if (vreg->reg)
2188 goto out;
2189
2190 /* Get the regulator handle */
2191 vreg->reg = devm_regulator_get(dev, vreg->name);
2192 if (IS_ERR(vreg->reg)) {
2193 ret = PTR_ERR(vreg->reg);
2194 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2195 __func__, vreg->name, ret);
2196 goto out;
2197 }
2198
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302199 if (regulator_count_voltages(vreg->reg) > 0) {
2200 vreg->set_voltage_sup = true;
2201 /* sanity check */
2202 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2203 pr_err("%s: %s invalid constraints specified\n",
2204 __func__, vreg->name);
2205 ret = -EINVAL;
2206 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302207 }
2208
2209out:
2210 return ret;
2211}
2212
2213static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2214{
2215 if (vreg->reg)
2216 devm_regulator_put(vreg->reg);
2217}
2218
2219static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2220 *vreg, int uA_load)
2221{
2222 int ret = 0;
2223
2224 /*
2225 * regulators that do not support regulator_set_voltage also
2226 * do not support regulator_set_optimum_mode
2227 */
2228 if (vreg->set_voltage_sup) {
2229 ret = regulator_set_load(vreg->reg, uA_load);
2230 if (ret < 0)
2231 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2232 __func__, vreg->name, uA_load, ret);
2233 else
2234 /*
2235 * regulator_set_load() can return non zero
2236 * value even for success case.
2237 */
2238 ret = 0;
2239 }
2240 return ret;
2241}
2242
2243static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2244 int min_uV, int max_uV)
2245{
2246 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302247 if (vreg->set_voltage_sup) {
2248 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2249 if (ret) {
2250 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302251 __func__, vreg->name, min_uV, max_uV, ret);
2252 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302253 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302254
2255 return ret;
2256}
2257
2258static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2259{
2260 int ret = 0;
2261
2262 /* Put regulator in HPM (high power mode) */
2263 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2264 if (ret < 0)
2265 return ret;
2266
2267 if (!vreg->is_enabled) {
2268 /* Set voltage level */
2269 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2270 vreg->high_vol_level);
2271 if (ret)
2272 return ret;
2273 }
2274 ret = regulator_enable(vreg->reg);
2275 if (ret) {
2276 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2277 __func__, vreg->name, ret);
2278 return ret;
2279 }
2280 vreg->is_enabled = true;
2281 return ret;
2282}
2283
2284static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2285{
2286 int ret = 0;
2287
2288 /* Never disable regulator marked as always_on */
2289 if (vreg->is_enabled && !vreg->is_always_on) {
2290 ret = regulator_disable(vreg->reg);
2291 if (ret) {
2292 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2293 __func__, vreg->name, ret);
2294 goto out;
2295 }
2296 vreg->is_enabled = false;
2297
2298 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2299 if (ret < 0)
2300 goto out;
2301
2302 /* Set min. voltage level to 0 */
2303 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2304 if (ret)
2305 goto out;
2306 } else if (vreg->is_enabled && vreg->is_always_on) {
2307 if (vreg->lpm_sup) {
2308 /* Put always_on regulator in LPM (low power mode) */
2309 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2310 vreg->lpm_uA);
2311 if (ret < 0)
2312 goto out;
2313 }
2314 }
2315out:
2316 return ret;
2317}
2318
2319static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2320 bool enable, bool is_init)
2321{
2322 int ret = 0, i;
2323 struct sdhci_msm_slot_reg_data *curr_slot;
2324 struct sdhci_msm_reg_data *vreg_table[2];
2325
2326 curr_slot = pdata->vreg_data;
2327 if (!curr_slot) {
2328 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2329 __func__);
2330 goto out;
2331 }
2332
2333 vreg_table[0] = curr_slot->vdd_data;
2334 vreg_table[1] = curr_slot->vdd_io_data;
2335
2336 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2337 if (vreg_table[i]) {
2338 if (enable)
2339 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2340 else
2341 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2342 if (ret)
2343 goto out;
2344 }
2345 }
2346out:
2347 return ret;
2348}
2349
Asutosh Das0ef24812012-12-18 16:14:02 +05302350/* This init function should be called only once for each SDHC slot */
2351static int sdhci_msm_vreg_init(struct device *dev,
2352 struct sdhci_msm_pltfm_data *pdata,
2353 bool is_init)
2354{
2355 int ret = 0;
2356 struct sdhci_msm_slot_reg_data *curr_slot;
2357 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2358
2359 curr_slot = pdata->vreg_data;
2360 if (!curr_slot)
2361 goto out;
2362
2363 curr_vdd_reg = curr_slot->vdd_data;
2364 curr_vdd_io_reg = curr_slot->vdd_io_data;
2365
2366 if (!is_init)
2367 /* Deregister all regulators from regulator framework */
2368 goto vdd_io_reg_deinit;
2369
2370 /*
2371 * Get the regulator handle from voltage regulator framework
2372 * and then try to set the voltage level for the regulator
2373 */
2374 if (curr_vdd_reg) {
2375 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2376 if (ret)
2377 goto out;
2378 }
2379 if (curr_vdd_io_reg) {
2380 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2381 if (ret)
2382 goto vdd_reg_deinit;
2383 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302384
Asutosh Das0ef24812012-12-18 16:14:02 +05302385 if (ret)
2386 dev_err(dev, "vreg reset failed (%d)\n", ret);
2387 goto out;
2388
2389vdd_io_reg_deinit:
2390 if (curr_vdd_io_reg)
2391 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2392vdd_reg_deinit:
2393 if (curr_vdd_reg)
2394 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2395out:
2396 return ret;
2397}
2398
2399
2400static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2401 enum vdd_io_level level,
2402 unsigned int voltage_level)
2403{
2404 int ret = 0;
2405 int set_level;
2406 struct sdhci_msm_reg_data *vdd_io_reg;
2407
2408 if (!pdata->vreg_data)
2409 return ret;
2410
2411 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2412 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2413 switch (level) {
2414 case VDD_IO_LOW:
2415 set_level = vdd_io_reg->low_vol_level;
2416 break;
2417 case VDD_IO_HIGH:
2418 set_level = vdd_io_reg->high_vol_level;
2419 break;
2420 case VDD_IO_SET_LEVEL:
2421 set_level = voltage_level;
2422 break;
2423 default:
2424 pr_err("%s: invalid argument level = %d",
2425 __func__, level);
2426 ret = -EINVAL;
2427 return ret;
2428 }
2429 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2430 set_level);
2431 }
2432 return ret;
2433}
2434
Ritesh Harjani42876f42015-11-17 17:46:51 +05302435/*
2436 * Acquire spin-lock host->lock before calling this function
2437 */
2438static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2439 bool enable)
2440{
2441 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2442 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2443
2444 if (enable && !msm_host->is_sdiowakeup_enabled)
2445 enable_irq(msm_host->pdata->sdiowakeup_irq);
2446 else if (!enable && msm_host->is_sdiowakeup_enabled)
2447 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2448 else
2449 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2450 __func__, enable, msm_host->is_sdiowakeup_enabled);
2451 msm_host->is_sdiowakeup_enabled = enable;
2452}
2453
2454static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2455{
2456 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302457 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2458 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2459
Ritesh Harjani42876f42015-11-17 17:46:51 +05302460 unsigned long flags;
2461
2462 pr_debug("%s: irq (%d) received\n", __func__, irq);
2463
2464 spin_lock_irqsave(&host->lock, flags);
2465 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2466 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302467 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302468
2469 return IRQ_HANDLED;
2470}
2471
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302472void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2473{
2474 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2475 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302476 const struct sdhci_msm_offset *msm_host_offset =
2477 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302478
2479 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2480 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302481 sdhci_msm_readl_relaxed(host,
2482 msm_host_offset->CORE_PWRCTL_STATUS),
2483 sdhci_msm_readl_relaxed(host,
2484 msm_host_offset->CORE_PWRCTL_MASK),
2485 sdhci_msm_readl_relaxed(host,
2486 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302487}
2488
Asutosh Das0ef24812012-12-18 16:14:02 +05302489static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2490{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002491 struct sdhci_host *host = (struct sdhci_host *)data;
2492 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2493 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302494 const struct sdhci_msm_offset *msm_host_offset =
2495 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302496 u8 irq_status = 0;
2497 u8 irq_ack = 0;
2498 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302499 int pwr_state = 0, io_level = 0;
2500 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302501 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302502
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302503 irq_status = sdhci_msm_readb_relaxed(host,
2504 msm_host_offset->CORE_PWRCTL_STATUS);
2505
Asutosh Das0ef24812012-12-18 16:14:02 +05302506 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2507 mmc_hostname(msm_host->mmc), irq, irq_status);
2508
2509 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302510 sdhci_msm_writeb_relaxed(irq_status, host,
2511 msm_host_offset->CORE_PWRCTL_CLEAR);
2512
Asutosh Das0ef24812012-12-18 16:14:02 +05302513 /*
2514 * SDHC has core_mem and hc_mem device memory and these memory
2515 * addresses do not fall within 1KB region. Hence, any update to
2516 * core_mem address space would require an mb() to ensure this gets
2517 * completed before its next update to registers within hc_mem.
2518 */
2519 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302520 /*
2521 * There is a rare HW scenario where the first clear pulse could be
2522 * lost when actual reset and clear/read of status register is
2523 * happening at a time. Hence, retry for at least 10 times to make
2524 * sure status register is cleared. Otherwise, this will result in
2525 * a spurious power IRQ resulting in system instability.
2526 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302527 while (irq_status & sdhci_msm_readb_relaxed(host,
2528 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302529 if (retry == 0) {
2530 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2531 mmc_hostname(host->mmc), irq_status);
2532 sdhci_msm_dump_pwr_ctrl_regs(host);
2533 BUG_ON(1);
2534 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302535 sdhci_msm_writeb_relaxed(irq_status, host,
2536 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302537 retry--;
2538 udelay(10);
2539 }
2540 if (likely(retry < 10))
2541 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2542 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302543
2544 /* Handle BUS ON/OFF*/
2545 if (irq_status & CORE_PWRCTL_BUS_ON) {
2546 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302547 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302548 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302549 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2550 VDD_IO_HIGH, 0);
2551 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302552 if (ret)
2553 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2554 else
2555 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302556
2557 pwr_state = REQ_BUS_ON;
2558 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302559 }
2560 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302561 if (msm_host->pltfm_init_done)
2562 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2563 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302564 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302566 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2567 VDD_IO_LOW, 0);
2568 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302569 if (ret)
2570 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2571 else
2572 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302573
2574 pwr_state = REQ_BUS_OFF;
2575 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302576 }
2577 /* Handle IO LOW/HIGH */
2578 if (irq_status & CORE_PWRCTL_IO_LOW) {
2579 /* Switch voltage Low */
2580 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2581 if (ret)
2582 irq_ack |= CORE_PWRCTL_IO_FAIL;
2583 else
2584 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302585
2586 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302587 }
2588 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2589 /* Switch voltage High */
2590 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2591 if (ret)
2592 irq_ack |= CORE_PWRCTL_IO_FAIL;
2593 else
2594 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302595
2596 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302597 }
2598
2599 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302600 sdhci_msm_writeb_relaxed(irq_ack, host,
2601 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302602 /*
2603 * SDHC has core_mem and hc_mem device memory and these memory
2604 * addresses do not fall within 1KB region. Hence, any update to
2605 * core_mem address space would require an mb() to ensure this gets
2606 * completed before its next update to registers within hc_mem.
2607 */
2608 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302609 if ((io_level & REQ_IO_HIGH) &&
2610 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2611 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302612 writel_relaxed((readl_relaxed(host->ioaddr +
2613 msm_host_offset->CORE_VENDOR_SPEC) &
2614 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2615 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002616 else if ((io_level & REQ_IO_LOW) ||
2617 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302618 writel_relaxed((readl_relaxed(host->ioaddr +
2619 msm_host_offset->CORE_VENDOR_SPEC) |
2620 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2621 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002622 mb();
2623
Asutosh Das0ef24812012-12-18 16:14:02 +05302624 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2625 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302626 spin_lock_irqsave(&host->lock, flags);
2627 if (pwr_state)
2628 msm_host->curr_pwr_state = pwr_state;
2629 if (io_level)
2630 msm_host->curr_io_level = io_level;
2631 complete(&msm_host->pwr_irq_completion);
2632 spin_unlock_irqrestore(&host->lock, flags);
2633
Asutosh Das0ef24812012-12-18 16:14:02 +05302634 return IRQ_HANDLED;
2635}
2636
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302637static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302638show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2639{
2640 struct sdhci_host *host = dev_get_drvdata(dev);
2641 int poll;
2642 unsigned long flags;
2643
2644 spin_lock_irqsave(&host->lock, flags);
2645 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2646 spin_unlock_irqrestore(&host->lock, flags);
2647
2648 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2649}
2650
2651static ssize_t
2652store_polling(struct device *dev, struct device_attribute *attr,
2653 const char *buf, size_t count)
2654{
2655 struct sdhci_host *host = dev_get_drvdata(dev);
2656 int value;
2657 unsigned long flags;
2658
2659 if (!kstrtou32(buf, 0, &value)) {
2660 spin_lock_irqsave(&host->lock, flags);
2661 if (value) {
2662 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2663 mmc_detect_change(host->mmc, 0);
2664 } else {
2665 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2666 }
2667 spin_unlock_irqrestore(&host->lock, flags);
2668 }
2669 return count;
2670}
2671
2672static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302673show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2674 char *buf)
2675{
2676 struct sdhci_host *host = dev_get_drvdata(dev);
2677 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2678 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2679
2680 return snprintf(buf, PAGE_SIZE, "%u\n",
2681 msm_host->msm_bus_vote.is_max_bw_needed);
2682}
2683
2684static ssize_t
2685store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2686 const char *buf, size_t count)
2687{
2688 struct sdhci_host *host = dev_get_drvdata(dev);
2689 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2690 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2691 uint32_t value;
2692 unsigned long flags;
2693
2694 if (!kstrtou32(buf, 0, &value)) {
2695 spin_lock_irqsave(&host->lock, flags);
2696 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2697 spin_unlock_irqrestore(&host->lock, flags);
2698 }
2699 return count;
2700}
2701
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302702static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302703{
2704 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2705 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302706 const struct sdhci_msm_offset *msm_host_offset =
2707 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302708 unsigned long flags;
2709 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302710 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302711
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302712 spin_lock_irqsave(&host->lock, flags);
2713 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2714 mmc_hostname(host->mmc), __func__, req_type,
2715 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302716 if (!msm_host->mci_removed)
2717 io_sig_sts = sdhci_msm_readl_relaxed(host,
2718 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302719
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302720 /*
2721 * The IRQ for request type IO High/Low will be generated when -
2722 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2723 * 2. If 1 is true and when there is a state change in 1.8V enable
2724 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2725 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2726 * layer tries to set it to 3.3V before card detection happens, the
2727 * IRQ doesn't get triggered as there is no state change in this bit.
2728 * The driver already handles this case by changing the IO voltage
2729 * level to high as part of controller power up sequence. Hence, check
2730 * for host->pwr to handle a case where IO voltage high request is
2731 * issued even before controller power up.
2732 */
2733 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2734 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2735 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2736 pr_debug("%s: do not wait for power IRQ that never comes\n",
2737 mmc_hostname(host->mmc));
2738 spin_unlock_irqrestore(&host->lock, flags);
2739 return;
2740 }
2741 }
2742
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302743 if ((req_type & msm_host->curr_pwr_state) ||
2744 (req_type & msm_host->curr_io_level))
2745 done = true;
2746 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302747
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302748 /*
2749 * This is needed here to hanlde a case where IRQ gets
2750 * triggered even before this function is called so that
2751 * x->done counter of completion gets reset. Otherwise,
2752 * next call to wait_for_completion returns immediately
2753 * without actually waiting for the IRQ to be handled.
2754 */
2755 if (done)
2756 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302757 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
2758 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
2759 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2760 mmc_hostname(host->mmc), req_type);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302761
2762 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2763 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302764}
2765
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002766static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2767{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302768 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2769 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2770 const struct sdhci_msm_offset *msm_host_offset =
2771 msm_host->offset;
2772 u32 config = readl_relaxed(host->ioaddr +
2773 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302774
2775 if (enable) {
2776 config |= CORE_CDR_EN;
2777 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302778 writel_relaxed(config, host->ioaddr +
2779 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302780 } else {
2781 config &= ~CORE_CDR_EN;
2782 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302783 writel_relaxed(config, host->ioaddr +
2784 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302785 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002786}
2787
Asutosh Das648f9d12013-01-10 21:11:04 +05302788static unsigned int sdhci_msm_max_segs(void)
2789{
2790 return SDHCI_MSM_MAX_SEGMENTS;
2791}
2792
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302793static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302794{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302795 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2796 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302797
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302798 return msm_host->pdata->sup_clk_table[0];
2799}
2800
2801static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2802{
2803 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2804 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2805 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2806
2807 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2808}
2809
2810static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2811 u32 req_clk)
2812{
2813 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2814 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2815 unsigned int sel_clk = -1;
2816 unsigned char cnt;
2817
2818 if (req_clk < sdhci_msm_get_min_clock(host)) {
2819 sel_clk = sdhci_msm_get_min_clock(host);
2820 return sel_clk;
2821 }
2822
2823 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2824 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2825 break;
2826 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2827 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2828 break;
2829 } else {
2830 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2831 }
2832 }
2833 return sel_clk;
2834}
2835
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002836static void sdhci_msm_registers_save(struct sdhci_host *host)
2837{
2838 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2839 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2840 const struct sdhci_msm_offset *msm_host_offset =
2841 msm_host->offset;
2842
2843 if (!msm_host->regs_restore.is_supported)
2844 return;
2845
2846 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
2847 msm_host_offset->CORE_VENDOR_SPEC);
2848 msm_host->regs_restore.vendor_pwrctl_mask =
2849 readl_relaxed(host->ioaddr +
2850 msm_host_offset->CORE_PWRCTL_MASK);
2851 msm_host->regs_restore.vendor_func2 =
2852 readl_relaxed(host->ioaddr +
2853 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2854 msm_host->regs_restore.vendor_func3 =
2855 readl_relaxed(host->ioaddr +
2856 msm_host_offset->CORE_VENDOR_SPEC3);
2857 msm_host->regs_restore.hc_2c_2e =
2858 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
2859 msm_host->regs_restore.hc_3c_3e =
2860 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
2861 msm_host->regs_restore.vendor_pwrctl_ctl =
2862 readl_relaxed(host->ioaddr +
2863 msm_host_offset->CORE_PWRCTL_CTL);
2864 msm_host->regs_restore.hc_38_3a =
2865 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
2866 msm_host->regs_restore.hc_34_36 =
2867 sdhci_readl(host, SDHCI_INT_ENABLE);
2868 msm_host->regs_restore.hc_28_2a =
2869 sdhci_readl(host, SDHCI_HOST_CONTROL);
2870 msm_host->regs_restore.vendor_caps_0 =
2871 readl_relaxed(host->ioaddr +
2872 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2873 msm_host->regs_restore.hc_caps_1 =
2874 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2875 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
2876 msm_host_offset->CORE_TESTBUS_CONFIG);
2877 msm_host->regs_restore.is_valid = true;
2878
2879 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
2880 mmc_hostname(host->mmc), __func__,
2881 readl_relaxed(host->ioaddr +
2882 msm_host_offset->CORE_PWRCTL_MASK));
2883}
2884
2885static void sdhci_msm_registers_restore(struct sdhci_host *host)
2886{
2887 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2888 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2889 const struct sdhci_msm_offset *msm_host_offset =
2890 msm_host->offset;
2891
2892 if (!msm_host->regs_restore.is_supported ||
2893 !msm_host->regs_restore.is_valid)
2894 return;
2895
2896 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
2897 msm_host_offset->CORE_VENDOR_SPEC);
2898 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
2899 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
2900 writel_relaxed(msm_host->regs_restore.vendor_func2,
2901 host->ioaddr +
2902 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
2903 writel_relaxed(msm_host->regs_restore.vendor_func3,
2904 host->ioaddr +
2905 msm_host_offset->CORE_VENDOR_SPEC3);
2906 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
2907 SDHCI_CLOCK_CONTROL);
2908 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
2909 SDHCI_AUTO_CMD_ERR);
2910 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
2911 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
2912 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
2913 SDHCI_SIGNAL_ENABLE);
2914 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
2915 SDHCI_INT_ENABLE);
2916 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
2917 SDHCI_HOST_CONTROL);
2918 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
2919 host->ioaddr +
2920 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
2921 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
2922 SDHCI_CAPABILITIES_1);
2923 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
2924 msm_host_offset->CORE_TESTBUS_CONFIG);
2925 msm_host->regs_restore.is_valid = false;
2926
2927 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
2928 mmc_hostname(host->mmc), __func__,
2929 readl_relaxed(host->ioaddr +
2930 msm_host_offset->CORE_PWRCTL_MASK));
2931}
2932
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302933static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2934{
2935 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2936 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2937 int rc = 0;
2938
2939 if (atomic_read(&msm_host->controller_clock))
2940 return 0;
2941
2942 sdhci_msm_bus_voting(host, 1);
2943
2944 if (!IS_ERR(msm_host->pclk)) {
2945 rc = clk_prepare_enable(msm_host->pclk);
2946 if (rc) {
2947 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2948 mmc_hostname(host->mmc), __func__, rc);
2949 goto remove_vote;
2950 }
2951 }
2952
2953 rc = clk_prepare_enable(msm_host->clk);
2954 if (rc) {
2955 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2956 mmc_hostname(host->mmc), __func__, rc);
2957 goto disable_pclk;
2958 }
2959
2960 atomic_set(&msm_host->controller_clock, 1);
2961 pr_debug("%s: %s: enabled controller clock\n",
2962 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002963 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302964 goto out;
2965
2966disable_pclk:
2967 if (!IS_ERR(msm_host->pclk))
2968 clk_disable_unprepare(msm_host->pclk);
2969remove_vote:
2970 if (msm_host->msm_bus_vote.client_handle)
2971 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2972out:
2973 return rc;
2974}
2975
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302976static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
2977{
2978 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2979 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302980
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302981 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002982 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302983 if (!IS_ERR(msm_host->clk))
2984 clk_disable_unprepare(msm_host->clk);
2985 if (!IS_ERR(msm_host->pclk))
2986 clk_disable_unprepare(msm_host->pclk);
2987 if (!IS_ERR(msm_host->ice_clk))
2988 clk_disable_unprepare(msm_host->ice_clk);
2989 sdhci_msm_bus_voting(host, 0);
2990 atomic_set(&msm_host->controller_clock, 0);
2991 pr_debug("%s: %s: disabled controller clock\n",
2992 mmc_hostname(host->mmc), __func__);
2993 }
2994}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302995
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302996static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2997{
2998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3000 int rc = 0;
3001
3002 if (enable && !atomic_read(&msm_host->clks_on)) {
3003 pr_debug("%s: request to enable clocks\n",
3004 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303005
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303006 /*
3007 * The bus-width or the clock rate might have changed
3008 * after controller clocks are enbaled, update bus vote
3009 * in such case.
3010 */
3011 if (atomic_read(&msm_host->controller_clock))
3012 sdhci_msm_bus_voting(host, 1);
3013
3014 rc = sdhci_msm_enable_controller_clock(host);
3015 if (rc)
3016 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303017
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303018 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3019 rc = clk_prepare_enable(msm_host->bus_clk);
3020 if (rc) {
3021 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3022 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303023 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303024 }
3025 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003026 if (!IS_ERR(msm_host->ff_clk)) {
3027 rc = clk_prepare_enable(msm_host->ff_clk);
3028 if (rc) {
3029 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3030 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303031 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003032 }
3033 }
3034 if (!IS_ERR(msm_host->sleep_clk)) {
3035 rc = clk_prepare_enable(msm_host->sleep_clk);
3036 if (rc) {
3037 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3038 mmc_hostname(host->mmc), __func__, rc);
3039 goto disable_ff_clk;
3040 }
3041 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303042 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303043
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303044 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303045 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3046 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303047 /*
3048 * During 1.8V signal switching the clock source must
3049 * still be ON as it requires accessing SDHC
3050 * registers (SDHCi host control2 register bit 3 must
3051 * be written and polled after stopping the SDCLK).
3052 */
3053 if (host->mmc->card_clock_off)
3054 return 0;
3055 pr_debug("%s: request to disable clocks\n",
3056 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003057 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3058 clk_disable_unprepare(msm_host->sleep_clk);
3059 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3060 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303061 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3062 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003063 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303064 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303065 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303066 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003067disable_ff_clk:
3068 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3069 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303070disable_bus_clk:
3071 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3072 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303073disable_controller_clk:
3074 if (!IS_ERR_OR_NULL(msm_host->clk))
3075 clk_disable_unprepare(msm_host->clk);
3076 if (!IS_ERR_OR_NULL(msm_host->pclk))
3077 clk_disable_unprepare(msm_host->pclk);
3078 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303079remove_vote:
3080 if (msm_host->msm_bus_vote.client_handle)
3081 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303082out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303083 return rc;
3084}
3085
3086static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3087{
3088 int rc;
3089 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3090 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303091 const struct sdhci_msm_offset *msm_host_offset =
3092 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003093 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303094 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003095 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303096 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303097
3098 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303099 /*
3100 * disable pwrsave to ensure clock is not auto-gated until
3101 * the rate is >400KHz (initialization complete).
3102 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303103 writel_relaxed(readl_relaxed(host->ioaddr +
3104 msm_host_offset->CORE_VENDOR_SPEC) &
3105 ~CORE_CLK_PWRSAVE, host->ioaddr +
3106 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303107 sdhci_msm_prepare_clocks(host, false);
3108 host->clock = clock;
3109 goto out;
3110 }
3111
3112 rc = sdhci_msm_prepare_clocks(host, true);
3113 if (rc)
3114 goto out;
3115
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303116 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3117 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303118 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003119 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303120 writel_relaxed(readl_relaxed(host->ioaddr +
3121 msm_host_offset->CORE_VENDOR_SPEC)
3122 | CORE_CLK_PWRSAVE, host->ioaddr +
3123 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303124 /*
3125 * Disable pwrsave for a newly added card if doesn't allow clock
3126 * gating.
3127 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003128 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303129 writel_relaxed(readl_relaxed(host->ioaddr +
3130 msm_host_offset->CORE_VENDOR_SPEC)
3131 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3132 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303133
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303134 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003135 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003136 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003137 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303138 /*
3139 * The SDHC requires internal clock frequency to be double the
3140 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003141 * uses the faster clock(100/400MHz) for some of its parts and
3142 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303143 */
3144 ddr_clock = clock * 2;
3145 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3146 ddr_clock);
3147 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003148
3149 /*
3150 * In general all timing modes are controlled via UHS mode select in
3151 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3152 * their respective modes defined here, hence we use these values.
3153 *
3154 * HS200 - SDR104 (Since they both are equivalent in functionality)
3155 * HS400 - This involves multiple configurations
3156 * Initially SDR104 - when tuning is required as HS200
3157 * Then when switching to DDR @ 400MHz (HS400) we use
3158 * the vendor specific HC_SELECT_IN to control the mode.
3159 *
3160 * In addition to controlling the modes we also need to select the
3161 * correct input clock for DLL depending on the mode.
3162 *
3163 * HS400 - divided clock (free running MCLK/2)
3164 * All other modes - default (free running MCLK)
3165 */
3166 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3167 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303168 writel_relaxed(((readl_relaxed(host->ioaddr +
3169 msm_host_offset->CORE_VENDOR_SPEC)
3170 & ~CORE_HC_MCLK_SEL_MASK)
3171 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3172 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003173 /*
3174 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3175 * register
3176 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303177 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003178 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303179 msm_host->enhanced_strobe)) &&
3180 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003181 /*
3182 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3183 * field in VENDOR_SPEC_FUNC
3184 */
3185 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303186 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003187 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303188 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3189 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003190 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003191 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3192 /*
3193 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3194 * CORE_DLL_STATUS to be set. This should get set
3195 * with in 15 us at 200 MHz.
3196 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303197 rc = readl_poll_timeout(host->ioaddr +
3198 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003199 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3200 CORE_DDR_DLL_LOCK)), 10, 1000);
3201 if (rc == -ETIMEDOUT)
3202 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3203 mmc_hostname(host->mmc),
3204 dll_lock);
3205 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003206 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003207 if (!msm_host->use_cdclp533)
3208 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3209 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303210 msm_host_offset->CORE_VENDOR_SPEC3)
3211 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3212 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003213
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003214 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303215 writel_relaxed(((readl_relaxed(host->ioaddr +
3216 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003217 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303218 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3219 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003220
3221 /*
3222 * Disable HC_SELECT_IN to be able to use the UHS mode select
3223 * configuration from Host Control2 register for all other
3224 * modes.
3225 *
3226 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3227 * in VENDOR_SPEC_FUNC
3228 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303229 writel_relaxed((readl_relaxed(host->ioaddr +
3230 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003231 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303232 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3233 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003234 }
3235 mb();
3236
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303237 if (sup_clock != msm_host->clk_rate) {
3238 pr_debug("%s: %s: setting clk rate to %u\n",
3239 mmc_hostname(host->mmc), __func__, sup_clock);
3240 rc = clk_set_rate(msm_host->clk, sup_clock);
3241 if (rc) {
3242 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3243 mmc_hostname(host->mmc), __func__,
3244 sup_clock, rc);
3245 goto out;
3246 }
3247 msm_host->clk_rate = sup_clock;
3248 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303249 /*
3250 * Update the bus vote in case of frequency change due to
3251 * clock scaling.
3252 */
3253 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303254 }
3255out:
3256 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303257}
3258
Sahitya Tummala14613432013-03-21 11:13:25 +05303259static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3260 unsigned int uhs)
3261{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303264 const struct sdhci_msm_offset *msm_host_offset =
3265 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303266 u16 ctrl_2;
3267
3268 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3269 /* Select Bus Speed Mode for host */
3270 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003271 if ((uhs == MMC_TIMING_MMC_HS400) ||
3272 (uhs == MMC_TIMING_MMC_HS200) ||
3273 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303274 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3275 else if (uhs == MMC_TIMING_UHS_SDR12)
3276 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3277 else if (uhs == MMC_TIMING_UHS_SDR25)
3278 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3279 else if (uhs == MMC_TIMING_UHS_SDR50)
3280 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003281 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3282 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303283 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303284 /*
3285 * When clock frquency is less than 100MHz, the feedback clock must be
3286 * provided and DLL must not be used so that tuning can be skipped. To
3287 * provide feedback clock, the mode selection can be any value less
3288 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3289 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003290 if (host->clock <= CORE_FREQ_100MHZ) {
3291 if ((uhs == MMC_TIMING_MMC_HS400) ||
3292 (uhs == MMC_TIMING_MMC_HS200) ||
3293 (uhs == MMC_TIMING_UHS_SDR104))
3294 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303295
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003296 /*
3297 * Make sure DLL is disabled when not required
3298 *
3299 * Write 1 to DLL_RST bit of DLL_CONFIG register
3300 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303301 writel_relaxed((readl_relaxed(host->ioaddr +
3302 msm_host_offset->CORE_DLL_CONFIG)
3303 | CORE_DLL_RST), host->ioaddr +
3304 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003305
3306 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303307 writel_relaxed((readl_relaxed(host->ioaddr +
3308 msm_host_offset->CORE_DLL_CONFIG)
3309 | CORE_DLL_PDN), host->ioaddr +
3310 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003311 mb();
3312
3313 /*
3314 * The DLL needs to be restored and CDCLP533 recalibrated
3315 * when the clock frequency is set back to 400MHz.
3316 */
3317 msm_host->calibration_done = false;
3318 }
3319
3320 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3321 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303322 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3323
3324}
3325
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003326#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003327#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303328static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003329{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303330 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303331 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3332 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303333 const struct sdhci_msm_offset *msm_host_offset =
3334 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303335 struct cmdq_host *cq_host = host->cq_host;
3336
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303337 u32 version = sdhci_msm_readl_relaxed(host,
3338 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003339 u16 minor = version & CORE_VERSION_TARGET_MASK;
3340 /* registers offset changed starting from 4.2.0 */
3341 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3342
3343 pr_err("---- Debug RAM dump ----\n");
3344 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3345 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3346 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3347
3348 while (i < 16) {
3349 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3350 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3351 i++;
3352 }
3353 pr_err("-------------------------\n");
3354}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303355
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303356static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3357{
3358 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3359 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3360 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3361
3362 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3363 sizeof(struct mmc_host));
3364 if (msm_host->mmc->card)
3365 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3366 sizeof(struct mmc_card));
3367 memcpy(&cached_data->copy_host, host,
3368 sizeof(struct sdhci_host));
3369}
3370
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303371void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3372{
3373 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3374 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303375 const struct sdhci_msm_offset *msm_host_offset =
3376 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303377 int tbsel, tbsel2;
3378 int i, index = 0;
3379 u32 test_bus_val = 0;
3380 u32 debug_reg[MAX_TEST_BUS] = {0};
3381
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303382 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303383 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003384 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303385 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003386
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303387 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3388 sdhci_msm_readl_relaxed(host,
3389 msm_host_offset->CORE_MCI_DATA_CNT),
3390 sdhci_msm_readl_relaxed(host,
3391 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303392 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303393 sdhci_msm_readl_relaxed(host,
3394 msm_host_offset->CORE_MCI_DATA_CNT),
3395 sdhci_msm_readl_relaxed(host,
3396 msm_host_offset->CORE_MCI_FIFO_CNT),
3397 sdhci_msm_readl_relaxed(host,
3398 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303399 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303400 readl_relaxed(host->ioaddr +
3401 msm_host_offset->CORE_DLL_CONFIG),
3402 readl_relaxed(host->ioaddr +
3403 msm_host_offset->CORE_DLL_STATUS),
3404 sdhci_msm_readl_relaxed(host,
3405 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303406 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303407 readl_relaxed(host->ioaddr +
3408 msm_host_offset->CORE_VENDOR_SPEC),
3409 readl_relaxed(host->ioaddr +
3410 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3411 readl_relaxed(host->ioaddr +
3412 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303413 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303414 readl_relaxed(host->ioaddr +
3415 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303416
3417 /*
3418 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3419 * of CORE_TESTBUS_CONFIG register.
3420 *
3421 * To select test bus 0 to 7 use tbsel and to select any test bus
3422 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3423 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3424 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3425 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003426 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303427 for (tbsel = 0; tbsel < 8; tbsel++) {
3428 if (index >= MAX_TEST_BUS)
3429 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303430 test_bus_val =
3431 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3432 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3433 sdhci_msm_writel_relaxed(test_bus_val, host,
3434 msm_host_offset->CORE_TESTBUS_CONFIG);
3435 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3436 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303437 }
3438 }
3439 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3440 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3441 i, i + 3, debug_reg[i], debug_reg[i+1],
3442 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003443}
3444
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303445/*
3446 * sdhci_msm_enhanced_strobe_mask :-
3447 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3448 * SW should write 3 to
3449 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3450 * The default reset value of this register is 2.
3451 */
3452static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3453{
3454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303456 const struct sdhci_msm_offset *msm_host_offset =
3457 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303458
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303459 if (!msm_host->enhanced_strobe ||
3460 !mmc_card_strobe(msm_host->mmc->card)) {
3461 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303462 mmc_hostname(host->mmc));
3463 return;
3464 }
3465
3466 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303467 writel_relaxed((readl_relaxed(host->ioaddr +
3468 msm_host_offset->CORE_VENDOR_SPEC3)
3469 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3470 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303471 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303472 writel_relaxed((readl_relaxed(host->ioaddr +
3473 msm_host_offset->CORE_VENDOR_SPEC3)
3474 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3475 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303476 }
3477}
3478
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003479static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3480{
3481 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3482 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303483 const struct sdhci_msm_offset *msm_host_offset =
3484 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003485
3486 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303487 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3488 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003489 } else {
3490 u32 value;
3491
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303492 value = sdhci_msm_readl_relaxed(host,
3493 msm_host_offset->CORE_TESTBUS_CONFIG);
3494 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3495 sdhci_msm_writel_relaxed(value, host,
3496 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003497 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303498}
3499
Pavan Anamula691dd592015-08-25 16:11:20 +05303500void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3501{
3502 u32 vendor_func2;
3503 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303504 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3505 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3506 const struct sdhci_msm_offset *msm_host_offset =
3507 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303508
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303509 vendor_func2 = readl_relaxed(host->ioaddr +
3510 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303511
3512 if (enable) {
3513 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303514 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303515 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303516 while (readl_relaxed(host->ioaddr +
3517 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303518 if (timeout == 0) {
3519 pr_info("%s: Applying wait idle disable workaround\n",
3520 mmc_hostname(host->mmc));
3521 /*
3522 * Apply the reset workaround to not wait for
3523 * pending data transfers on AXI before
3524 * resetting the controller. This could be
3525 * risky if the transfers were stuck on the
3526 * AXI bus.
3527 */
3528 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303529 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303530 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303531 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3532 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303533 host->reset_wa_t = ktime_get();
3534 return;
3535 }
3536 timeout--;
3537 udelay(10);
3538 }
3539 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3540 mmc_hostname(host->mmc));
3541 } else {
3542 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303543 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303544 }
3545}
3546
Gilad Broner44445992015-09-29 16:05:39 +03003547static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3548{
3549 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303550 container_of(work, struct sdhci_msm_pm_qos_irq,
3551 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003552
3553 if (atomic_read(&pm_qos_irq->counter))
3554 return;
3555
3556 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3557 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3558}
3559
3560void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3561{
3562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3563 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3564 struct sdhci_msm_pm_qos_latency *latency =
3565 &msm_host->pdata->pm_qos_data.irq_latency;
3566 int counter;
3567
3568 if (!msm_host->pm_qos_irq.enabled)
3569 return;
3570
3571 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3572 /* Make sure to update the voting in case power policy has changed */
3573 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3574 && counter > 1)
3575 return;
3576
Asutosh Das36c2e922015-12-01 12:19:58 +05303577 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003578 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3579 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3580 msm_host->pm_qos_irq.latency);
3581}
3582
3583void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3584{
3585 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3586 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3587 int counter;
3588
3589 if (!msm_host->pm_qos_irq.enabled)
3590 return;
3591
Subhash Jadavani4d813902015-10-15 12:16:43 -07003592 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3593 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3594 } else {
3595 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3596 return;
Gilad Broner44445992015-09-29 16:05:39 +03003597 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003598
Gilad Broner44445992015-09-29 16:05:39 +03003599 if (counter)
3600 return;
3601
3602 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303603 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3604 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003605 return;
3606 }
3607
3608 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3609 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3610 msm_host->pm_qos_irq.latency);
3611}
3612
Gilad Broner68c54562015-09-20 11:59:46 +03003613static ssize_t
3614sdhci_msm_pm_qos_irq_show(struct device *dev,
3615 struct device_attribute *attr, char *buf)
3616{
3617 struct sdhci_host *host = dev_get_drvdata(dev);
3618 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3619 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3620 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3621
3622 return snprintf(buf, PAGE_SIZE,
3623 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3624 irq->enabled, atomic_read(&irq->counter), irq->latency);
3625}
3626
3627static ssize_t
3628sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3629 struct device_attribute *attr, char *buf)
3630{
3631 struct sdhci_host *host = dev_get_drvdata(dev);
3632 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3633 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3634
3635 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3636}
3637
3638static ssize_t
3639sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3640 struct device_attribute *attr, const char *buf, size_t count)
3641{
3642 struct sdhci_host *host = dev_get_drvdata(dev);
3643 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3644 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3645 uint32_t value;
3646 bool enable;
3647 int ret;
3648
3649 ret = kstrtou32(buf, 0, &value);
3650 if (ret)
3651 goto out;
3652 enable = !!value;
3653
3654 if (enable == msm_host->pm_qos_irq.enabled)
3655 goto out;
3656
3657 msm_host->pm_qos_irq.enabled = enable;
3658 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303659 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003660 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3661 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3662 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3663 msm_host->pm_qos_irq.latency);
3664 }
3665
3666out:
3667 return count;
3668}
3669
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003670#ifdef CONFIG_SMP
3671static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3672 struct sdhci_host *host)
3673{
3674 msm_host->pm_qos_irq.req.irq = host->irq;
3675}
3676#else
3677static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3678 struct sdhci_host *host) { }
3679#endif
3680
Gilad Broner44445992015-09-29 16:05:39 +03003681void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3682{
3683 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3684 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3685 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003686 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003687
3688 if (!msm_host->pdata->pm_qos_data.irq_valid)
3689 return;
3690
3691 /* Initialize only once as this gets called per partition */
3692 if (msm_host->pm_qos_irq.enabled)
3693 return;
3694
3695 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3696 msm_host->pm_qos_irq.req.type =
3697 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003698 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3699 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3700 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003701 else
3702 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3703 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3704
Asutosh Das36c2e922015-12-01 12:19:58 +05303705 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003706 sdhci_msm_pm_qos_irq_unvote_work);
3707 /* For initialization phase, set the performance latency */
3708 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3709 msm_host->pm_qos_irq.latency =
3710 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3711 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3712 msm_host->pm_qos_irq.latency);
3713 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003714
3715 /* sysfs */
3716 msm_host->pm_qos_irq.enable_attr.show =
3717 sdhci_msm_pm_qos_irq_enable_show;
3718 msm_host->pm_qos_irq.enable_attr.store =
3719 sdhci_msm_pm_qos_irq_enable_store;
3720 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3721 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3722 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3723 ret = device_create_file(&msm_host->pdev->dev,
3724 &msm_host->pm_qos_irq.enable_attr);
3725 if (ret)
3726 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3727 __func__, ret);
3728
3729 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3730 msm_host->pm_qos_irq.status_attr.store = NULL;
3731 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3732 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3733 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3734 ret = device_create_file(&msm_host->pdev->dev,
3735 &msm_host->pm_qos_irq.status_attr);
3736 if (ret)
3737 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3738 __func__, ret);
3739}
3740
3741static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3742 struct device_attribute *attr, char *buf)
3743{
3744 struct sdhci_host *host = dev_get_drvdata(dev);
3745 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3746 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3747 struct sdhci_msm_pm_qos_group *group;
3748 int i;
3749 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3750 int offset = 0;
3751
3752 for (i = 0; i < nr_groups; i++) {
3753 group = &msm_host->pm_qos[i];
3754 offset += snprintf(&buf[offset], PAGE_SIZE,
3755 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3756 i, group->req.cpus_affine.bits[0],
3757 msm_host->pm_qos_group_enable,
3758 atomic_read(&group->counter),
3759 group->latency);
3760 }
3761
3762 return offset;
3763}
3764
3765static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3766 struct device_attribute *attr, char *buf)
3767{
3768 struct sdhci_host *host = dev_get_drvdata(dev);
3769 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3770 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3771
3772 return snprintf(buf, PAGE_SIZE, "%s\n",
3773 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3774}
3775
3776static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3777 struct device_attribute *attr, const char *buf, size_t count)
3778{
3779 struct sdhci_host *host = dev_get_drvdata(dev);
3780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3782 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3783 uint32_t value;
3784 bool enable;
3785 int ret;
3786 int i;
3787
3788 ret = kstrtou32(buf, 0, &value);
3789 if (ret)
3790 goto out;
3791 enable = !!value;
3792
3793 if (enable == msm_host->pm_qos_group_enable)
3794 goto out;
3795
3796 msm_host->pm_qos_group_enable = enable;
3797 if (!enable) {
3798 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303799 cancel_delayed_work_sync(
3800 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003801 atomic_set(&msm_host->pm_qos[i].counter, 0);
3802 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3803 pm_qos_update_request(&msm_host->pm_qos[i].req,
3804 msm_host->pm_qos[i].latency);
3805 }
3806 }
3807
3808out:
3809 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003810}
3811
3812static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3813{
3814 int i;
3815 struct sdhci_msm_cpu_group_map *map =
3816 &msm_host->pdata->pm_qos_data.cpu_group_map;
3817
3818 if (cpu < 0)
3819 goto not_found;
3820
3821 for (i = 0; i < map->nr_groups; i++)
3822 if (cpumask_test_cpu(cpu, &map->mask[i]))
3823 return i;
3824
3825not_found:
3826 return -EINVAL;
3827}
3828
3829void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3830 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3831{
3832 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3833 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3834 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3835 struct sdhci_msm_pm_qos_group *pm_qos_group;
3836 int counter;
3837
3838 if (!msm_host->pm_qos_group_enable || group < 0)
3839 return;
3840
3841 pm_qos_group = &msm_host->pm_qos[group];
3842 counter = atomic_inc_return(&pm_qos_group->counter);
3843
3844 /* Make sure to update the voting in case power policy has changed */
3845 if (pm_qos_group->latency == latency->latency[host->power_policy]
3846 && counter > 1)
3847 return;
3848
Asutosh Das36c2e922015-12-01 12:19:58 +05303849 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003850
3851 pm_qos_group->latency = latency->latency[host->power_policy];
3852 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3853}
3854
3855static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3856{
3857 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303858 container_of(work, struct sdhci_msm_pm_qos_group,
3859 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003860
3861 if (atomic_read(&group->counter))
3862 return;
3863
3864 group->latency = PM_QOS_DEFAULT_VALUE;
3865 pm_qos_update_request(&group->req, group->latency);
3866}
3867
Gilad Broner07d92eb2015-09-29 16:57:21 +03003868bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003869{
3870 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3871 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3872 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3873
3874 if (!msm_host->pm_qos_group_enable || group < 0 ||
3875 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003876 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003877
3878 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303879 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3880 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003881 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003882 }
3883
3884 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3885 pm_qos_update_request(&msm_host->pm_qos[group].req,
3886 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003887 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003888}
3889
3890void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3891 struct sdhci_msm_pm_qos_latency *latency)
3892{
3893 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3894 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3895 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3896 struct sdhci_msm_pm_qos_group *group;
3897 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003898 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003899
3900 if (msm_host->pm_qos_group_enable)
3901 return;
3902
3903 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3904 GFP_KERNEL);
3905 if (!msm_host->pm_qos)
3906 return;
3907
3908 for (i = 0; i < nr_groups; i++) {
3909 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303910 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003911 sdhci_msm_pm_qos_cpu_unvote_work);
3912 atomic_set(&group->counter, 0);
3913 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3914 cpumask_copy(&group->req.cpus_affine,
3915 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05303916 /* We set default latency here for all pm_qos cpu groups. */
3917 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03003918 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3919 group->latency);
3920 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3921 __func__, i,
3922 group->req.cpus_affine.bits[0],
3923 group->latency,
3924 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3925 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003926 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003927 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003928
3929 /* sysfs */
3930 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3931 msm_host->pm_qos_group_status_attr.store = NULL;
3932 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3933 msm_host->pm_qos_group_status_attr.attr.name =
3934 "pm_qos_cpu_groups_status";
3935 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3936 ret = device_create_file(&msm_host->pdev->dev,
3937 &msm_host->pm_qos_group_status_attr);
3938 if (ret)
3939 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3940 __func__, ret);
3941 msm_host->pm_qos_group_enable_attr.show =
3942 sdhci_msm_pm_qos_group_enable_show;
3943 msm_host->pm_qos_group_enable_attr.store =
3944 sdhci_msm_pm_qos_group_enable_store;
3945 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3946 msm_host->pm_qos_group_enable_attr.attr.name =
3947 "pm_qos_cpu_groups_enable";
3948 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3949 ret = device_create_file(&msm_host->pdev->dev,
3950 &msm_host->pm_qos_group_enable_attr);
3951 if (ret)
3952 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3953 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003954}
3955
Gilad Broner07d92eb2015-09-29 16:57:21 +03003956static void sdhci_msm_pre_req(struct sdhci_host *host,
3957 struct mmc_request *mmc_req)
3958{
3959 int cpu;
3960 int group;
3961 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3962 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3963 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3964 msm_host->pm_qos_prev_cpu);
3965
3966 sdhci_msm_pm_qos_irq_vote(host);
3967
3968 cpu = get_cpu();
3969 put_cpu();
3970 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3971 if (group < 0)
3972 return;
3973
3974 if (group != prev_group && prev_group >= 0) {
3975 sdhci_msm_pm_qos_cpu_unvote(host,
3976 msm_host->pm_qos_prev_cpu, false);
3977 prev_group = -1; /* make sure to vote for new group */
3978 }
3979
3980 if (prev_group < 0) {
3981 sdhci_msm_pm_qos_cpu_vote(host,
3982 msm_host->pdata->pm_qos_data.latency, cpu);
3983 msm_host->pm_qos_prev_cpu = cpu;
3984 }
3985}
3986
3987static void sdhci_msm_post_req(struct sdhci_host *host,
3988 struct mmc_request *mmc_req)
3989{
3990 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3991 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3992
3993 sdhci_msm_pm_qos_irq_unvote(host, false);
3994
3995 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3996 msm_host->pm_qos_prev_cpu = -1;
3997}
3998
3999static void sdhci_msm_init(struct sdhci_host *host)
4000{
4001 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4002 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4003
4004 sdhci_msm_pm_qos_irq_init(host);
4005
4006 if (msm_host->pdata->pm_qos_data.legacy_valid)
4007 sdhci_msm_pm_qos_cpu_init(host,
4008 msm_host->pdata->pm_qos_data.latency);
4009}
4010
Sahitya Tummala9150a942014-10-31 15:33:04 +05304011static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4012{
4013 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4014 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4015 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4016 u32 max_curr = 0;
4017
4018 if (curr_slot && curr_slot->vdd_data)
4019 max_curr = curr_slot->vdd_data->hpm_uA;
4020
4021 return max_curr;
4022}
4023
Asutosh Das0ef24812012-12-18 16:14:02 +05304024static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05304025 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304026 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004027 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304028 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004029 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304030 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304031 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304032 .get_min_clock = sdhci_msm_get_min_clock,
4033 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304034 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304035 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304036 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004037 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08004038 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004039 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304040 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304041 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004042 .init = sdhci_msm_init,
4043 .pre_req = sdhci_msm_pre_req,
4044 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304045 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05304046};
4047
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304048static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4049 struct sdhci_host *host)
4050{
Krishna Konda46fd1432014-10-30 21:13:27 -07004051 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304052 u16 minor;
4053 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304054 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304055 const struct sdhci_msm_offset *msm_host_offset =
4056 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304057
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304058 version = sdhci_msm_readl_relaxed(host,
4059 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304060 major = (version & CORE_VERSION_MAJOR_MASK) >>
4061 CORE_VERSION_MAJOR_SHIFT;
4062 minor = version & CORE_VERSION_TARGET_MASK;
4063
Krishna Konda46fd1432014-10-30 21:13:27 -07004064 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4065
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304066 /*
4067 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004068 * controller won't advertise 3.0v, 1.8v and 8-bit features
4069 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304070 */
4071 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004072 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004073 /*
4074 * Enable 1.8V support capability on controllers that
4075 * support dual voltage
4076 */
4077 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004078 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4079 caps |= CORE_3_0V_SUPPORT;
4080 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004081 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304082 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4083 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304084 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004085
4086 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304087 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4088 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4089 */
4090 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304091 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304092 val = readl_relaxed(host->ioaddr +
4093 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304094 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304095 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304096 }
4097 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004098 * SDCC 5 controller with major version 1, minor version 0x34 and later
4099 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4100 */
4101 if ((major == 1) && (minor < 0x34))
4102 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004103
4104 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004105 * SDCC 5 controller with major version 1, minor version 0x42 and later
4106 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304107 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004108 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304109 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004110 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304111 msm_host->enhanced_strobe = true;
4112 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004113
4114 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004115 * SDCC 5 controller with major version 1 and minor version 0x42,
4116 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4117 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304118 * when MCLK is gated OFF, it is not gated for less than 0.5us
4119 * and MCLK must be switched on for at-least 1us before DATA
4120 * starts coming.
4121 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004122 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4123 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304124 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004125
Pavan Anamula5a256df2015-10-16 14:38:28 +05304126 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304127 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304128 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304129 writel_relaxed((readl_relaxed(host->ioaddr +
4130 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4131 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304132 }
4133
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004134 if ((major == 1) && (minor >= 0x49))
4135 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304136 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004137 * Mask 64-bit support for controller with 32-bit address bus so that
4138 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004139 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004140 if (!msm_host->pdata->largeaddressbus)
4141 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4142
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304143 writel_relaxed(caps, host->ioaddr +
4144 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004145 /* keep track of the value in SDHCI_CAPABILITIES */
4146 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304147
4148 if ((major == 1) && (minor >= 0x6b))
4149 msm_host->ice_hci_support = true;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304150}
4151
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004152#ifdef CONFIG_MMC_CQ_HCI
4153static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4154 struct platform_device *pdev)
4155{
4156 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4157 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4158
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304159 if (nocmdq) {
4160 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4161 return;
4162 }
4163
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004164 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004165 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004166 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4167 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004168 host->cq_host = NULL;
4169 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004170 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004171 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004172}
4173#else
4174static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4175 struct platform_device *pdev)
4176{
4177
4178}
4179#endif
4180
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004181static bool sdhci_msm_is_bootdevice(struct device *dev)
4182{
4183 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4184 strlen(saved_command_line))) {
4185 char search_string[50];
4186
4187 snprintf(search_string, ARRAY_SIZE(search_string),
4188 "androidboot.bootdevice=%s", dev_name(dev));
4189 if (strnstr(saved_command_line, search_string,
4190 strlen(saved_command_line)))
4191 return true;
4192 else
4193 return false;
4194 }
4195
4196 /*
4197 * "androidboot.bootdevice=" argument is not present then
4198 * return true as we don't know the boot device anyways.
4199 */
4200 return true;
4201}
4202
Asutosh Das0ef24812012-12-18 16:14:02 +05304203static int sdhci_msm_probe(struct platform_device *pdev)
4204{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304205 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304206 struct sdhci_host *host;
4207 struct sdhci_pltfm_host *pltfm_host;
4208 struct sdhci_msm_host *msm_host;
4209 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004210 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004211 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004212 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304213 struct resource *tlmm_memres = NULL;
4214 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304215 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304216
4217 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4218 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4219 GFP_KERNEL);
4220 if (!msm_host) {
4221 ret = -ENOMEM;
4222 goto out;
4223 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304224
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304225 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4226 msm_host->mci_removed = true;
4227 msm_host->offset = &sdhci_msm_offset_mci_removed;
4228 } else {
4229 msm_host->mci_removed = false;
4230 msm_host->offset = &sdhci_msm_offset_mci_present;
4231 }
4232 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304233 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4234 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4235 if (IS_ERR(host)) {
4236 ret = PTR_ERR(host);
4237 goto out;
4238 }
4239
4240 pltfm_host = sdhci_priv(host);
4241 pltfm_host->priv = msm_host;
4242 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304243 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304244
4245 /* Extract platform data */
4246 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004247 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304248 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004249 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4250 ret);
4251 goto pltfm_free;
4252 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004253
4254 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004255 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4256 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004257 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004258 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004259
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004260 if (disable_slots & (1 << (ret - 1))) {
4261 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4262 ret);
4263 ret = -ENODEV;
4264 goto pltfm_free;
4265 }
4266
Sayali Lokhande5f768322016-04-11 18:36:53 +05304267 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004268 sdhci_slot[ret-1] = msm_host;
4269
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004270 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4271 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304272 if (!msm_host->pdata) {
4273 dev_err(&pdev->dev, "DT parsing error\n");
4274 goto pltfm_free;
4275 }
4276 } else {
4277 dev_err(&pdev->dev, "No device tree node\n");
4278 goto pltfm_free;
4279 }
4280
4281 /* Setup Clocks */
4282
4283 /* Setup SDCC bus voter clock. */
4284 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4285 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4286 /* Vote for max. clk rate for max. performance */
4287 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4288 if (ret)
4289 goto pltfm_free;
4290 ret = clk_prepare_enable(msm_host->bus_clk);
4291 if (ret)
4292 goto pltfm_free;
4293 }
4294
4295 /* Setup main peripheral bus clock */
4296 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4297 if (!IS_ERR(msm_host->pclk)) {
4298 ret = clk_prepare_enable(msm_host->pclk);
4299 if (ret)
4300 goto bus_clk_disable;
4301 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304302 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304303
4304 /* Setup SDC MMC clock */
4305 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4306 if (IS_ERR(msm_host->clk)) {
4307 ret = PTR_ERR(msm_host->clk);
4308 goto pclk_disable;
4309 }
4310
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304311 /* Set to the minimum supported clock frequency */
4312 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4313 if (ret) {
4314 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304315 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304316 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304317 ret = clk_prepare_enable(msm_host->clk);
4318 if (ret)
4319 goto pclk_disable;
4320
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304321 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304322 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304323
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004324 /* Setup CDC calibration fixed feedback clock */
4325 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4326 if (!IS_ERR(msm_host->ff_clk)) {
4327 ret = clk_prepare_enable(msm_host->ff_clk);
4328 if (ret)
4329 goto clk_disable;
4330 }
4331
4332 /* Setup CDC calibration sleep clock */
4333 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4334 if (!IS_ERR(msm_host->sleep_clk)) {
4335 ret = clk_prepare_enable(msm_host->sleep_clk);
4336 if (ret)
4337 goto ff_clk_disable;
4338 }
4339
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004340 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4341
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304342 ret = sdhci_msm_bus_register(msm_host, pdev);
4343 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004344 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304345
4346 if (msm_host->msm_bus_vote.client_handle)
4347 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4348 sdhci_msm_bus_work);
4349 sdhci_msm_bus_voting(host, 1);
4350
Asutosh Das0ef24812012-12-18 16:14:02 +05304351 /* Setup regulators */
4352 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4353 if (ret) {
4354 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304355 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304356 }
4357
4358 /* Reset the core and Enable SDHC mode */
4359 core_memres = platform_get_resource_byname(pdev,
4360 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304361 if (!msm_host->mci_removed) {
4362 if (!core_memres) {
4363 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4364 goto vreg_deinit;
4365 }
4366 msm_host->core_mem = devm_ioremap(&pdev->dev,
4367 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304368
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304369 if (!msm_host->core_mem) {
4370 dev_err(&pdev->dev, "Failed to remap registers\n");
4371 ret = -ENOMEM;
4372 goto vreg_deinit;
4373 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304374 }
4375
Sahitya Tummala079ed852015-10-29 20:18:45 +05304376 tlmm_memres = platform_get_resource_byname(pdev,
4377 IORESOURCE_MEM, "tlmm_mem");
4378 if (tlmm_memres) {
4379 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4380 resource_size(tlmm_memres));
4381
4382 if (!tlmm_mem) {
4383 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4384 ret = -ENOMEM;
4385 goto vreg_deinit;
4386 }
4387 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4388 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4389 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4390 }
4391
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304392 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004393 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304394 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004395 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304396 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304397
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304398 if (!msm_host->mci_removed) {
4399 /* Set HC_MODE_EN bit in HC_MODE register */
4400 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304401
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304402 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4403 writel_relaxed(readl_relaxed(msm_host->core_mem +
4404 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4405 msm_host->core_mem + CORE_HC_MODE);
4406 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304407 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004408
4409 /*
4410 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4411 * be used as required later on.
4412 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304413 writel_relaxed((readl_relaxed(host->ioaddr +
4414 msm_host_offset->CORE_VENDOR_SPEC) |
4415 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4416 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304417 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304418 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4419 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4420 * interrupt in GIC (by registering the interrupt handler), we need to
4421 * ensure that any pending power irq interrupt status is acknowledged
4422 * otherwise power irq interrupt handler would be fired prematurely.
4423 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304424 irq_status = sdhci_msm_readl_relaxed(host,
4425 msm_host_offset->CORE_PWRCTL_STATUS);
4426 sdhci_msm_writel_relaxed(irq_status, host,
4427 msm_host_offset->CORE_PWRCTL_CLEAR);
4428 irq_ctl = sdhci_msm_readl_relaxed(host,
4429 msm_host_offset->CORE_PWRCTL_CTL);
4430
Subhash Jadavani28137342013-05-14 17:46:43 +05304431 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4432 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4433 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4434 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304435 sdhci_msm_writel_relaxed(irq_ctl, host,
4436 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004437
Subhash Jadavani28137342013-05-14 17:46:43 +05304438 /*
4439 * Ensure that above writes are propogated before interrupt enablement
4440 * in GIC.
4441 */
4442 mb();
4443
4444 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304445 * Following are the deviations from SDHC spec v3.0 -
4446 * 1. Card detection is handled using separate GPIO.
4447 * 2. Bus power control is handled by interacting with PMIC.
4448 */
4449 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4450 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304451 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004452 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304453 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304454 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304455 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304456 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304457 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304458 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304459
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304460 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4461 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4462
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004463 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004464 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4465 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4466 SDHCI_VENDOR_VER_SHIFT));
4467 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4468 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4469 /*
4470 * Add 40us delay in interrupt handler when
4471 * operating at initialization frequency(400KHz).
4472 */
4473 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4474 /*
4475 * Set Software Reset for DAT line in Software
4476 * Reset Register (Bit 2).
4477 */
4478 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4479 }
4480
Asutosh Das214b9662013-06-13 14:27:42 +05304481 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4482
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004483 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004484 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4485 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304486 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004487 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304488 goto vreg_deinit;
4489 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004490 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304491 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004492 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304493 if (ret) {
4494 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004495 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304496 goto vreg_deinit;
4497 }
4498
4499 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304500 sdhci_msm_writel_relaxed(INT_MASK, host,
4501 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304502
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304503#ifdef CONFIG_MMC_CLKGATE
4504 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4505 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4506#endif
4507
Asutosh Das0ef24812012-12-18 16:14:02 +05304508 /* Set host capabilities */
4509 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4510 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004511 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304512 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304513 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004514 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004515 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004516 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304517 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004518 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004519 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304520 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304521
4522 if (msm_host->pdata->nonremovable)
4523 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4524
Guoping Yuf7c91332014-08-20 16:56:18 +08004525 if (msm_host->pdata->nonhotplug)
4526 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4527
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07004528 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
4529
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304530 init_completion(&msm_host->pwr_irq_completion);
4531
Sahitya Tummala581df132013-03-12 14:57:46 +05304532 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304533 /*
4534 * Set up the card detect GPIO in active configuration before
4535 * configuring it as an IRQ. Otherwise, it can be in some
4536 * weird/inconsistent state resulting in flood of interrupts.
4537 */
4538 sdhci_msm_setup_pins(msm_host->pdata, true);
4539
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304540 /*
4541 * This delay is needed for stabilizing the card detect GPIO
4542 * line after changing the pull configs.
4543 */
4544 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304545 ret = mmc_gpio_request_cd(msm_host->mmc,
4546 msm_host->pdata->status_gpio, 0);
4547 if (ret) {
4548 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4549 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304550 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304551 }
4552 }
4553
Krishna Konda7feab352013-09-17 23:55:40 -07004554 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4555 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4556 host->dma_mask = DMA_BIT_MASK(64);
4557 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304558 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004559 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304560 host->dma_mask = DMA_BIT_MASK(32);
4561 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304562 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304563 } else {
4564 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4565 }
4566
Ritesh Harjani42876f42015-11-17 17:46:51 +05304567 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4568 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304569 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304570 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4571 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304572 msm_host->is_sdiowakeup_enabled = true;
4573 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4574 sdhci_msm_sdiowakeup_irq,
4575 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4576 "sdhci-msm sdiowakeup", host);
4577 if (ret) {
4578 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4579 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4580 msm_host->pdata->sdiowakeup_irq = -1;
4581 msm_host->is_sdiowakeup_enabled = false;
4582 goto vreg_deinit;
4583 } else {
4584 spin_lock_irqsave(&host->lock, flags);
4585 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304586 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304587 spin_unlock_irqrestore(&host->lock, flags);
4588 }
4589 }
4590
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004591 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304592 ret = sdhci_add_host(host);
4593 if (ret) {
4594 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304595 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304596 }
4597
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05304598 msm_host->pltfm_init_done = true;
4599
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004600 pm_runtime_set_active(&pdev->dev);
4601 pm_runtime_enable(&pdev->dev);
4602 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4603 pm_runtime_use_autosuspend(&pdev->dev);
4604
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304605 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4606 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4607 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4608 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4609 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4610 ret = device_create_file(&pdev->dev,
4611 &msm_host->msm_bus_vote.max_bus_bw);
4612 if (ret)
4613 goto remove_host;
4614
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304615 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4616 msm_host->polling.show = show_polling;
4617 msm_host->polling.store = store_polling;
4618 sysfs_attr_init(&msm_host->polling.attr);
4619 msm_host->polling.attr.name = "polling";
4620 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4621 ret = device_create_file(&pdev->dev, &msm_host->polling);
4622 if (ret)
4623 goto remove_max_bus_bw_file;
4624 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304625
4626 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4627 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4628 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4629 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4630 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4631 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4632 if (ret) {
4633 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4634 mmc_hostname(host->mmc), __func__, ret);
4635 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4636 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304637 /* Successful initialization */
4638 goto out;
4639
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304640remove_max_bus_bw_file:
4641 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304642remove_host:
4643 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004644 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304645 sdhci_remove_host(host, dead);
4646vreg_deinit:
4647 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304648bus_unregister:
4649 if (msm_host->msm_bus_vote.client_handle)
4650 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4651 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004652sleep_clk_disable:
4653 if (!IS_ERR(msm_host->sleep_clk))
4654 clk_disable_unprepare(msm_host->sleep_clk);
4655ff_clk_disable:
4656 if (!IS_ERR(msm_host->ff_clk))
4657 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304658clk_disable:
4659 if (!IS_ERR(msm_host->clk))
4660 clk_disable_unprepare(msm_host->clk);
4661pclk_disable:
4662 if (!IS_ERR(msm_host->pclk))
4663 clk_disable_unprepare(msm_host->pclk);
4664bus_clk_disable:
4665 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4666 clk_disable_unprepare(msm_host->bus_clk);
4667pltfm_free:
4668 sdhci_pltfm_free(pdev);
4669out:
4670 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4671 return ret;
4672}
4673
4674static int sdhci_msm_remove(struct platform_device *pdev)
4675{
4676 struct sdhci_host *host = platform_get_drvdata(pdev);
4677 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4678 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4679 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4680 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4681 0xffffffff);
4682
4683 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304684 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4685 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304686 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004687 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304688 sdhci_remove_host(host, dead);
4689 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304690
Asutosh Das0ef24812012-12-18 16:14:02 +05304691 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304692
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304693 sdhci_msm_setup_pins(pdata, true);
4694 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304695
4696 if (msm_host->msm_bus_vote.client_handle) {
4697 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4698 sdhci_msm_bus_unregister(msm_host);
4699 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304700 return 0;
4701}
4702
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004703#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304704static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4705{
4706 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4707 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4708 unsigned long flags;
4709 int ret = 0;
4710
4711 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4712 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4713 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304714 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304715 return 1;
4716 }
4717
4718 spin_lock_irqsave(&host->lock, flags);
4719 if (enable) {
4720 /* configure DAT1 gpio if applicable */
4721 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304722 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304723 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4724 if (!ret)
4725 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4726 goto out;
4727 } else {
4728 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4729 mmc_hostname(host->mmc), enable);
4730 }
4731 } else {
4732 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4733 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4734 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304735 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304736 } else {
4737 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4738 mmc_hostname(host->mmc), enable);
4739
4740 }
4741 }
4742out:
4743 if (ret)
4744 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4745 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4746 ret, msm_host->pdata->sdiowakeup_irq);
4747 spin_unlock_irqrestore(&host->lock, flags);
4748 return ret;
4749}
4750
4751
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004752static int sdhci_msm_runtime_suspend(struct device *dev)
4753{
4754 struct sdhci_host *host = dev_get_drvdata(dev);
4755 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4756 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004757 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004758
Ritesh Harjani42876f42015-11-17 17:46:51 +05304759 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4760 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304761
Ritesh Harjani42876f42015-11-17 17:46:51 +05304762 sdhci_cfg_irq(host, false, true);
4763
4764defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004765 disable_irq(msm_host->pwr_irq);
4766
4767 /*
4768 * Remove the vote immediately only if clocks are off in which
4769 * case we might have queued work to remove vote but it may not
4770 * be completed before runtime suspend or system suspend.
4771 */
4772 if (!atomic_read(&msm_host->clks_on)) {
4773 if (msm_host->msm_bus_vote.client_handle)
4774 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4775 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004776 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4777 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004778
4779 return 0;
4780}
4781
4782static int sdhci_msm_runtime_resume(struct device *dev)
4783{
4784 struct sdhci_host *host = dev_get_drvdata(dev);
4785 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4786 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004787 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004788
Ritesh Harjani42876f42015-11-17 17:46:51 +05304789 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4790 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304791
Ritesh Harjani42876f42015-11-17 17:46:51 +05304792 sdhci_cfg_irq(host, true, true);
4793
4794defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004795 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004796
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004797 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4798 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004799 return 0;
4800}
4801
4802static int sdhci_msm_suspend(struct device *dev)
4803{
4804 struct sdhci_host *host = dev_get_drvdata(dev);
4805 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4806 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004807 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304808 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004809 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004810
4811 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4812 (msm_host->mmc->slot.cd_irq >= 0))
4813 disable_irq(msm_host->mmc->slot.cd_irq);
4814
4815 if (pm_runtime_suspended(dev)) {
4816 pr_debug("%s: %s: already runtime suspended\n",
4817 mmc_hostname(host->mmc), __func__);
4818 goto out;
4819 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004820 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004821out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304822 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304823 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4824 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4825 if (sdio_cfg)
4826 sdhci_cfg_irq(host, false, true);
4827 }
4828
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004829 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4830 ktime_to_us(ktime_sub(ktime_get(), start)));
4831 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004832}
4833
4834static int sdhci_msm_resume(struct device *dev)
4835{
4836 struct sdhci_host *host = dev_get_drvdata(dev);
4837 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4838 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4839 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304840 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004841 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004842
4843 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4844 (msm_host->mmc->slot.cd_irq >= 0))
4845 enable_irq(msm_host->mmc->slot.cd_irq);
4846
4847 if (pm_runtime_suspended(dev)) {
4848 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4849 mmc_hostname(host->mmc), __func__);
4850 goto out;
4851 }
4852
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004853 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004854out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304855 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4856 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4857 if (sdio_cfg)
4858 sdhci_cfg_irq(host, true, true);
4859 }
4860
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004861 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4862 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004863 return ret;
4864}
4865
Ritesh Harjani42876f42015-11-17 17:46:51 +05304866static int sdhci_msm_suspend_noirq(struct device *dev)
4867{
4868 struct sdhci_host *host = dev_get_drvdata(dev);
4869 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4870 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4871 int ret = 0;
4872
4873 /*
4874 * ksdioirqd may be running, hence retry
4875 * suspend in case the clocks are ON
4876 */
4877 if (atomic_read(&msm_host->clks_on)) {
4878 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4879 mmc_hostname(host->mmc), __func__);
4880 ret = -EAGAIN;
4881 }
4882
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304883 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4884 if (msm_host->sdio_pending_processing)
4885 ret = -EBUSY;
4886
Ritesh Harjani42876f42015-11-17 17:46:51 +05304887 return ret;
4888}
4889
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004890static const struct dev_pm_ops sdhci_msm_pmops = {
4891 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4892 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4893 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304894 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004895};
4896
4897#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4898
4899#else
4900#define SDHCI_MSM_PMOPS NULL
4901#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304902static const struct of_device_id sdhci_msm_dt_match[] = {
4903 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304904 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004905 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304906};
4907MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4908
4909static struct platform_driver sdhci_msm_driver = {
4910 .probe = sdhci_msm_probe,
4911 .remove = sdhci_msm_remove,
4912 .driver = {
4913 .name = "sdhci_msm",
4914 .owner = THIS_MODULE,
4915 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004916 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304917 },
4918};
4919
4920module_platform_driver(sdhci_msm_driver);
4921
4922MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4923MODULE_LICENSE("GPL v2");