blob: bcf33b8fa648a79aca22ba22e6c95499d24336eb [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Ritesh Harjani82124772014-11-04 15:34:00 +0530316/* Timeout value to avoid infinite waiting for pwr_irq */
317#define MSM_PWR_IRQ_TIMEOUT_MS 5000
318
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700319static const u32 tuning_block_64[] = {
320 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
321 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
322 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
323 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
324};
325
326static const u32 tuning_block_128[] = {
327 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
328 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
329 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
330 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
331 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
332 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
333 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
334 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
335};
Asutosh Das0ef24812012-12-18 16:14:02 +0530336
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700337/* global to hold each slot instance for debug */
338static struct sdhci_msm_host *sdhci_slot[2];
339
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700340static int disable_slots;
341/* root can write, others read */
342module_param(disable_slots, int, S_IRUGO|S_IWUSR);
343
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530344static bool nocmdq;
345module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
346
Asutosh Das0ef24812012-12-18 16:14:02 +0530347enum vdd_io_level {
348 /* set vdd_io_data->low_vol_level */
349 VDD_IO_LOW,
350 /* set vdd_io_data->high_vol_level */
351 VDD_IO_HIGH,
352 /*
353 * set whatever there in voltage_level (third argument) of
354 * sdhci_msm_set_vdd_io_vol() function.
355 */
356 VDD_IO_SET_LEVEL,
357};
358
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700359/* MSM platform specific tuning */
360static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
361 u8 poll)
362{
363 int rc = 0;
364 u32 wait_cnt = 50;
365 u8 ck_out_en = 0;
366 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530367 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
368 struct sdhci_msm_host *msm_host = pltfm_host->priv;
369 const struct sdhci_msm_offset *msm_host_offset =
370 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700371
372 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530373 ck_out_en = !!(readl_relaxed(host->ioaddr +
374 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700375
376 while (ck_out_en != poll) {
377 if (--wait_cnt == 0) {
378 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
379 mmc_hostname(mmc), __func__, poll);
380 rc = -ETIMEDOUT;
381 goto out;
382 }
383 udelay(1);
384
385 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530386 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700387 }
388out:
389 return rc;
390}
391
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530392/*
393 * Enable CDR to track changes of DAT lines and adjust sampling
394 * point according to voltage/temperature variations
395 */
396static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
397{
398 int rc = 0;
399 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530400 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
401 struct sdhci_msm_host *msm_host = pltfm_host->priv;
402 const struct sdhci_msm_offset *msm_host_offset =
403 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530404
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530405 config = readl_relaxed(host->ioaddr +
406 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530407 config |= CORE_CDR_EN;
408 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530409 writel_relaxed(config, host->ioaddr +
410 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530411
412 rc = msm_dll_poll_ck_out_en(host, 0);
413 if (rc)
414 goto err;
415
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530416 writel_relaxed((readl_relaxed(host->ioaddr +
417 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
418 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530419
420 rc = msm_dll_poll_ck_out_en(host, 1);
421 if (rc)
422 goto err;
423 goto out;
424err:
425 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
426out:
427 return rc;
428}
429
430static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
431 *attr, const char *buf, size_t count)
432{
433 struct sdhci_host *host = dev_get_drvdata(dev);
434 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
435 struct sdhci_msm_host *msm_host = pltfm_host->priv;
436 u32 tmp;
437 unsigned long flags;
438
439 if (!kstrtou32(buf, 0, &tmp)) {
440 spin_lock_irqsave(&host->lock, flags);
441 msm_host->en_auto_cmd21 = !!tmp;
442 spin_unlock_irqrestore(&host->lock, flags);
443 }
444 return count;
445}
446
447static ssize_t show_auto_cmd21(struct device *dev,
448 struct device_attribute *attr, char *buf)
449{
450 struct sdhci_host *host = dev_get_drvdata(dev);
451 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
452 struct sdhci_msm_host *msm_host = pltfm_host->priv;
453
454 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
455}
456
457/* MSM auto-tuning handler */
458static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
459 bool enable,
460 u32 type)
461{
462 int rc = 0;
463 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
464 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530465 const struct sdhci_msm_offset *msm_host_offset =
466 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530467 u32 val = 0;
468
469 if (!msm_host->en_auto_cmd21)
470 return 0;
471
472 if (type == MMC_SEND_TUNING_BLOCK_HS200)
473 val = CORE_HC_AUTO_CMD21_EN;
474 else
475 return 0;
476
477 if (enable) {
478 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530479 writel_relaxed(readl_relaxed(host->ioaddr +
480 msm_host_offset->CORE_VENDOR_SPEC) | val,
481 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530482 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530483 writel_relaxed(readl_relaxed(host->ioaddr +
484 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
485 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530486 }
487 return rc;
488}
489
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700490static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
491{
492 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530493 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
494 struct sdhci_msm_host *msm_host = pltfm_host->priv;
495 const struct sdhci_msm_offset *msm_host_offset =
496 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700497 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
498 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
499 0x8};
500 unsigned long flags;
501 u32 config;
502 struct mmc_host *mmc = host->mmc;
503
504 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
505 spin_lock_irqsave(&host->lock, flags);
506
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530507 config = readl_relaxed(host->ioaddr +
508 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700509 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
510 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530511 writel_relaxed(config, host->ioaddr +
512 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700513
514 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
515 rc = msm_dll_poll_ck_out_en(host, 0);
516 if (rc)
517 goto err_out;
518
519 /*
520 * Write the selected DLL clock output phase (0 ... 15)
521 * to CDR_SELEXT bit field of DLL_CONFIG register.
522 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530523 writel_relaxed(((readl_relaxed(host->ioaddr +
524 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700525 & ~(0xF << 20))
526 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530527 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700528
529 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 writel_relaxed((readl_relaxed(host->ioaddr +
531 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
532 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700533
534 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
535 rc = msm_dll_poll_ck_out_en(host, 1);
536 if (rc)
537 goto err_out;
538
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530539 config = readl_relaxed(host->ioaddr +
540 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700541 config |= CORE_CDR_EN;
542 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530543 writel_relaxed(config, host->ioaddr +
544 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700545 goto out;
546
547err_out:
548 pr_err("%s: %s: Failed to set DLL phase: %d\n",
549 mmc_hostname(mmc), __func__, phase);
550out:
551 spin_unlock_irqrestore(&host->lock, flags);
552 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
553 return rc;
554}
555
556/*
557 * Find out the greatest range of consecuitive selected
558 * DLL clock output phases that can be used as sampling
559 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700560 * timing mode) or for eMMC4.5 card read operation (in
561 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700562 * Select the 3/4 of the range and configure the DLL with the
563 * selected DLL clock output phase.
564 */
565
566static int msm_find_most_appropriate_phase(struct sdhci_host *host,
567 u8 *phase_table, u8 total_phases)
568{
569 int ret;
570 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
571 u8 phases_per_row[MAX_PHASES] = {0};
572 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
573 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
574 bool phase_0_found = false, phase_15_found = false;
575 struct mmc_host *mmc = host->mmc;
576
577 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
578 if (!total_phases || (total_phases > MAX_PHASES)) {
579 pr_err("%s: %s: invalid argument: total_phases=%d\n",
580 mmc_hostname(mmc), __func__, total_phases);
581 return -EINVAL;
582 }
583
584 for (cnt = 0; cnt < total_phases; cnt++) {
585 ranges[row_index][col_index] = phase_table[cnt];
586 phases_per_row[row_index] += 1;
587 col_index++;
588
589 if ((cnt + 1) == total_phases) {
590 continue;
591 /* check if next phase in phase_table is consecutive or not */
592 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
593 row_index++;
594 col_index = 0;
595 }
596 }
597
598 if (row_index >= MAX_PHASES)
599 return -EINVAL;
600
601 /* Check if phase-0 is present in first valid window? */
602 if (!ranges[0][0]) {
603 phase_0_found = true;
604 phase_0_raw_index = 0;
605 /* Check if cycle exist between 2 valid windows */
606 for (cnt = 1; cnt <= row_index; cnt++) {
607 if (phases_per_row[cnt]) {
608 for (i = 0; i < phases_per_row[cnt]; i++) {
609 if (ranges[cnt][i] == 15) {
610 phase_15_found = true;
611 phase_15_raw_index = cnt;
612 break;
613 }
614 }
615 }
616 }
617 }
618
619 /* If 2 valid windows form cycle then merge them as single window */
620 if (phase_0_found && phase_15_found) {
621 /* number of phases in raw where phase 0 is present */
622 u8 phases_0 = phases_per_row[phase_0_raw_index];
623 /* number of phases in raw where phase 15 is present */
624 u8 phases_15 = phases_per_row[phase_15_raw_index];
625
626 if (phases_0 + phases_15 >= MAX_PHASES)
627 /*
628 * If there are more than 1 phase windows then total
629 * number of phases in both the windows should not be
630 * more than or equal to MAX_PHASES.
631 */
632 return -EINVAL;
633
634 /* Merge 2 cyclic windows */
635 i = phases_15;
636 for (cnt = 0; cnt < phases_0; cnt++) {
637 ranges[phase_15_raw_index][i] =
638 ranges[phase_0_raw_index][cnt];
639 if (++i >= MAX_PHASES)
640 break;
641 }
642
643 phases_per_row[phase_0_raw_index] = 0;
644 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
645 }
646
647 for (cnt = 0; cnt <= row_index; cnt++) {
648 if (phases_per_row[cnt] > curr_max) {
649 curr_max = phases_per_row[cnt];
650 selected_row_index = cnt;
651 }
652 }
653
654 i = ((curr_max * 3) / 4);
655 if (i)
656 i--;
657
658 ret = (int)ranges[selected_row_index][i];
659
660 if (ret >= MAX_PHASES) {
661 ret = -EINVAL;
662 pr_err("%s: %s: invalid phase selected=%d\n",
663 mmc_hostname(mmc), __func__, ret);
664 }
665
666 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
667 return ret;
668}
669
670static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
671{
672 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530673 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
674 struct sdhci_msm_host *msm_host = pltfm_host->priv;
675 const struct sdhci_msm_offset *msm_host_offset =
676 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700677
678 /* Program the MCLK value to MCLK_FREQ bit field */
679 if (host->clock <= 112000000)
680 mclk_freq = 0;
681 else if (host->clock <= 125000000)
682 mclk_freq = 1;
683 else if (host->clock <= 137000000)
684 mclk_freq = 2;
685 else if (host->clock <= 150000000)
686 mclk_freq = 3;
687 else if (host->clock <= 162000000)
688 mclk_freq = 4;
689 else if (host->clock <= 175000000)
690 mclk_freq = 5;
691 else if (host->clock <= 187000000)
692 mclk_freq = 6;
693 else if (host->clock <= 200000000)
694 mclk_freq = 7;
695
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530696 writel_relaxed(((readl_relaxed(host->ioaddr +
697 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700698 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530699 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700700}
701
702/* Initialize the DLL (Programmable Delay Line ) */
703static int msm_init_cm_dll(struct sdhci_host *host)
704{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800705 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
706 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530707 const struct sdhci_msm_offset *msm_host_offset =
708 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700709 struct mmc_host *mmc = host->mmc;
710 int rc = 0;
711 unsigned long flags;
712 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530713 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714
715 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
716 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530717 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
718 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530719 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700720 /*
721 * Make sure that clock is always enabled when DLL
722 * tuning is in progress. Keeping PWRSAVE ON may
723 * turn off the clock. So let's disable the PWRSAVE
724 * here and re-enable it once tuning is completed.
725 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530726 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530727 writel_relaxed((readl_relaxed(host->ioaddr +
728 msm_host_offset->CORE_VENDOR_SPEC)
729 & ~CORE_CLK_PWRSAVE), host->ioaddr +
730 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530731 curr_pwrsave = false;
732 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700733
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800734 if (msm_host->use_updated_dll_reset) {
735 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530736 writel_relaxed((readl_relaxed(host->ioaddr +
737 msm_host_offset->CORE_DLL_CONFIG)
738 & ~CORE_CK_OUT_EN), host->ioaddr +
739 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800740
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530741 writel_relaxed((readl_relaxed(host->ioaddr +
742 msm_host_offset->CORE_DLL_CONFIG_2)
743 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
744 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800745 }
746
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700747 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530748 writel_relaxed((readl_relaxed(host->ioaddr +
749 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
750 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700751
752 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530753 writel_relaxed((readl_relaxed(host->ioaddr +
754 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
755 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700756 msm_cm_dll_set_freq(host);
757
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800758 if (msm_host->use_updated_dll_reset) {
759 u32 mclk_freq = 0;
760
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530761 if ((readl_relaxed(host->ioaddr +
762 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800763 & CORE_FLL_CYCLE_CNT))
764 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
765 else
766 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
767
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530768 writel_relaxed(((readl_relaxed(host->ioaddr +
769 msm_host_offset->CORE_DLL_CONFIG_2)
770 & ~(0xFF << 10)) | (mclk_freq << 10)),
771 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800772 /* wait for 5us before enabling DLL clock */
773 udelay(5);
774 }
775
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700776 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530777 writel_relaxed((readl_relaxed(host->ioaddr +
778 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
779 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700780
781 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530782 writel_relaxed((readl_relaxed(host->ioaddr +
783 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
784 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700785
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800786 if (msm_host->use_updated_dll_reset) {
787 msm_cm_dll_set_freq(host);
788 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530789 writel_relaxed((readl_relaxed(host->ioaddr +
790 msm_host_offset->CORE_DLL_CONFIG_2)
791 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800793 }
794
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700795 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530796 writel_relaxed((readl_relaxed(host->ioaddr +
797 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
798 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700799
800 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530801 writel_relaxed((readl_relaxed(host->ioaddr +
802 msm_host_offset->CORE_DLL_CONFIG)
803 | CORE_CK_OUT_EN), host->ioaddr +
804 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700805
806 wait_cnt = 50;
807 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530808 while (!(readl_relaxed(host->ioaddr +
809 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700810 /* max. wait for 50us sec for LOCK bit to be set */
811 if (--wait_cnt == 0) {
812 pr_err("%s: %s: DLL failed to LOCK\n",
813 mmc_hostname(mmc), __func__);
814 rc = -ETIMEDOUT;
815 goto out;
816 }
817 /* wait for 1us before polling again */
818 udelay(1);
819 }
820
821out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530822 /* Restore the correct PWRSAVE state */
823 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530824 u32 reg = readl_relaxed(host->ioaddr +
825 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530826
827 if (prev_pwrsave)
828 reg |= CORE_CLK_PWRSAVE;
829 else
830 reg &= ~CORE_CLK_PWRSAVE;
831
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530832 writel_relaxed(reg, host->ioaddr +
833 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530834 }
835
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700836 spin_unlock_irqrestore(&host->lock, flags);
837 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
838 return rc;
839}
840
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700841static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
842{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700843 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844 int ret = 0;
845 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530846 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
847 struct sdhci_msm_host *msm_host = pltfm_host->priv;
848 const struct sdhci_msm_offset *msm_host_offset =
849 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850
851 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
852
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530854 writel_relaxed((readl_relaxed(host->ioaddr +
855 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700856 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530857 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700858
859 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
860 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
861 & ~CORE_CDC_SWITCH_BYPASS_OFF),
862 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
863
864 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
865 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
866 | CORE_CDC_SWITCH_RC_EN),
867 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868
869 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530870 writel_relaxed((readl_relaxed(host->ioaddr +
871 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700872 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874
875 /*
876 * Perform CDC Register Initialization Sequence
877 *
878 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
879 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
880 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
881 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
882 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
883 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
884 * CORE_CSR_CDC_DELAY_CFG 0x3AC
885 * CORE_CDC_OFFSET_CFG 0x0
886 * CORE_CDC_SLAVE_DDA_CFG 0x16334
887 */
888
889 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
891 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
892 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
893 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
894 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700895 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700896 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
897 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
898
899 /* CDC HW Calibration */
900
901 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
902 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
903 | CORE_SW_TRIG_FULL_CALIB),
904 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
905
906 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
907 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
908 & ~CORE_SW_TRIG_FULL_CALIB),
909 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
910
911 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
912 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
913 | CORE_HW_AUTOCAL_ENA),
914 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
915
916 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
917 writel_relaxed((readl_relaxed(host->ioaddr +
918 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
919 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
920
921 mb();
922
923 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700924 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
925 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
926
927 if (ret == -ETIMEDOUT) {
928 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700929 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700930 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700931 }
932
933 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
934 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
935 & CORE_CDC_ERROR_CODE_MASK;
936 if (cdc_err) {
937 pr_err("%s: %s: CDC Error Code %d\n",
938 mmc_hostname(host->mmc), __func__, cdc_err);
939 ret = -EINVAL;
940 goto out;
941 }
942
943 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530944 writel_relaxed((readl_relaxed(host->ioaddr +
945 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700946 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530947 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700948out:
949 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
950 __func__, ret);
951 return ret;
952}
953
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700954static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
955{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530956 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
957 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530958 const struct sdhci_msm_offset *msm_host_offset =
959 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530960 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700961 int ret = 0;
962
963 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
964
965 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530966 * Reprogramming the value in case it might have been modified by
967 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700968 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700969 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530970 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
971 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700972 } else {
973 ddr_config = DDR_CONFIG_POR_VAL &
974 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
975 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 writel_relaxed(ddr_config, host->ioaddr +
977 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700978 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700979
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530980 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed((readl_relaxed(host->ioaddr +
982 msm_host_offset->CORE_DDR_200_CFG)
983 | CORE_CMDIN_RCLK_EN), host->ioaddr +
984 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530985
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 writel_relaxed((readl_relaxed(host->ioaddr +
988 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700989 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530990 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700991
992 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530993 ret = readl_poll_timeout(host->ioaddr +
994 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700995 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
996
997 if (ret == -ETIMEDOUT) {
998 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
999 mmc_hostname(host->mmc), __func__);
1000 goto out;
1001 }
1002
Ritesh Harjani764065e2015-05-13 14:14:45 +05301003 /*
1004 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1005 * when MCLK is gated OFF, it is not gated for less than 0.5us
1006 * and MCLK must be switched on for at-least 1us before DATA
1007 * starts coming. Controllers with 14lpp tech DLL cannot
1008 * guarantee above requirement. So PWRSAVE_DLL should not be
1009 * turned on for host controllers using this DLL.
1010 */
1011 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301012 writel_relaxed((readl_relaxed(host->ioaddr +
1013 msm_host_offset->CORE_VENDOR_SPEC3)
1014 | CORE_PWRSAVE_DLL), host->ioaddr +
1015 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001016 mb();
1017out:
1018 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1019 __func__, ret);
1020 return ret;
1021}
1022
Ritesh Harjaniea709662015-05-27 15:40:24 +05301023static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1024{
1025 int ret = 0;
1026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1027 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1028 struct mmc_host *mmc = host->mmc;
1029
1030 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1031
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301032 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1033 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301034 mmc_hostname(mmc));
1035 return -EINVAL;
1036 }
1037
1038 if (msm_host->calibration_done ||
1039 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1040 return 0;
1041 }
1042
1043 /*
1044 * Reset the tuning block.
1045 */
1046 ret = msm_init_cm_dll(host);
1047 if (ret)
1048 goto out;
1049
1050 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1051out:
1052 if (!ret)
1053 msm_host->calibration_done = true;
1054 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1055 __func__, ret);
1056 return ret;
1057}
1058
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001059static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1060{
1061 int ret = 0;
1062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1063 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301064 const struct sdhci_msm_offset *msm_host_offset =
1065 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001066
1067 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1068
1069 /*
1070 * Retuning in HS400 (DDR mode) will fail, just reset the
1071 * tuning block and restore the saved tuning phase.
1072 */
1073 ret = msm_init_cm_dll(host);
1074 if (ret)
1075 goto out;
1076
1077 /* Set the selected phase in delay line hw block */
1078 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1079 if (ret)
1080 goto out;
1081
Krishna Konda0e8efba2014-06-23 14:50:38 -07001082 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301083 writel_relaxed((readl_relaxed(host->ioaddr +
1084 msm_host_offset->CORE_DLL_CONFIG)
1085 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1086 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001087
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001088 if (msm_host->use_cdclp533)
1089 /* Calibrate CDCLP533 DLL HW */
1090 ret = sdhci_msm_cdclp533_calibration(host);
1091 else
1092 /* Calibrate CM_DLL_SDC4 HW */
1093 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1094out:
1095 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1096 __func__, ret);
1097 return ret;
1098}
1099
Krishna Konda96e6b112013-10-28 15:25:03 -07001100static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1101 u8 drv_type)
1102{
1103 struct mmc_command cmd = {0};
1104 struct mmc_request mrq = {NULL};
1105 struct mmc_host *mmc = host->mmc;
1106 u8 val = ((drv_type << 4) | 2);
1107
1108 cmd.opcode = MMC_SWITCH;
1109 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1110 (EXT_CSD_HS_TIMING << 16) |
1111 (val << 8) |
1112 EXT_CSD_CMD_SET_NORMAL;
1113 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1114 /* 1 sec */
1115 cmd.busy_timeout = 1000 * 1000;
1116
1117 memset(cmd.resp, 0, sizeof(cmd.resp));
1118 cmd.retries = 3;
1119
1120 mrq.cmd = &cmd;
1121 cmd.data = NULL;
1122
1123 mmc_wait_for_req(mmc, &mrq);
1124 pr_debug("%s: %s: set card drive type to %d\n",
1125 mmc_hostname(mmc), __func__,
1126 drv_type);
1127}
1128
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001129int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1130{
1131 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301132 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001133 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001134 const u32 *tuning_block_pattern = tuning_block_64;
1135 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1136 int rc;
1137 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301138 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001141 u8 drv_type = 0;
1142 bool drv_type_changed = false;
1143 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301144 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301145
1146 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001147 * Tuning is required for SDR104, HS200 and HS400 cards and
1148 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301149 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001150 if (host->clock <= CORE_FREQ_100MHZ ||
1151 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1152 (ios.timing == MMC_TIMING_MMC_HS200) ||
1153 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301154 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001155
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301156 /*
1157 * Don't allow re-tuning for CRC errors observed for any commands
1158 * that are sent during tuning sequence itself.
1159 */
1160 if (msm_host->tuning_in_progress)
1161 return 0;
1162 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001163 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001164
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001165 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001166 if (msm_host->tuning_done && !msm_host->calibration_done &&
1167 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001168 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001169 spin_lock_irqsave(&host->lock, flags);
1170 if (!rc)
1171 msm_host->calibration_done = true;
1172 spin_unlock_irqrestore(&host->lock, flags);
1173 goto out;
1174 }
1175
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001176 spin_lock_irqsave(&host->lock, flags);
1177
1178 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1179 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1180 tuning_block_pattern = tuning_block_128;
1181 size = sizeof(tuning_block_128);
1182 }
1183 spin_unlock_irqrestore(&host->lock, flags);
1184
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001185 data_buf = kmalloc(size, GFP_KERNEL);
1186 if (!data_buf) {
1187 rc = -ENOMEM;
1188 goto out;
1189 }
1190
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301191retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001192 tuned_phase_cnt = 0;
1193
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301194 /* first of all reset the tuning block */
1195 rc = msm_init_cm_dll(host);
1196 if (rc)
1197 goto kfree;
1198
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001199 phase = 0;
1200 do {
1201 struct mmc_command cmd = {0};
1202 struct mmc_data data = {0};
1203 struct mmc_request mrq = {
1204 .cmd = &cmd,
1205 .data = &data
1206 };
1207 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301208 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001209
1210 /* set the phase in delay line hw block */
1211 rc = msm_config_cm_dll_phase(host, phase);
1212 if (rc)
1213 goto kfree;
1214
1215 cmd.opcode = opcode;
1216 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1217
1218 data.blksz = size;
1219 data.blocks = 1;
1220 data.flags = MMC_DATA_READ;
1221 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1222
1223 data.sg = &sg;
1224 data.sg_len = 1;
1225 sg_init_one(&sg, data_buf, size);
1226 memset(data_buf, 0, size);
1227 mmc_wait_for_req(mmc, &mrq);
1228
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301229 if (card && (cmd.error || data.error)) {
1230 sts_cmd.opcode = MMC_SEND_STATUS;
1231 sts_cmd.arg = card->rca << 16;
1232 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1233 sts_retry = 5;
1234 while (sts_retry) {
1235 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1236
1237 if (sts_cmd.error ||
1238 (R1_CURRENT_STATE(sts_cmd.resp[0])
1239 != R1_STATE_TRAN)) {
1240 sts_retry--;
1241 /*
1242 * wait for at least 146 MCLK cycles for
1243 * the card to move to TRANS state. As
1244 * the MCLK would be min 200MHz for
1245 * tuning, we need max 0.73us delay. To
1246 * be on safer side 1ms delay is given.
1247 */
1248 usleep_range(1000, 1200);
1249 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1250 mmc_hostname(mmc), phase,
1251 sts_cmd.error, sts_cmd.resp[0]);
1252 continue;
1253 }
1254 break;
1255 };
1256 }
1257
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001258 if (!cmd.error && !data.error &&
1259 !memcmp(data_buf, tuning_block_pattern, size)) {
1260 /* tuning is successful at this tuning point */
1261 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001262 pr_debug("%s: %s: found *** good *** phase = %d\n",
1263 mmc_hostname(mmc), __func__, phase);
1264 } else {
1265 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001266 mmc_hostname(mmc), __func__, phase);
1267 }
1268 } while (++phase < 16);
1269
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301270 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1271 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001272 /*
1273 * If all phases pass then its a problem. So change the card's
1274 * drive type to a different value, if supported and repeat
1275 * tuning until at least one phase fails. Then set the original
1276 * drive type back.
1277 *
1278 * If all the phases still pass after trying all possible
1279 * drive types, then one of those 16 phases will be picked.
1280 * This is no different from what was going on before the
1281 * modification to change drive type and retune.
1282 */
1283 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1284 tuned_phase_cnt);
1285
1286 /* set drive type to other value . default setting is 0x0 */
1287 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001288 pr_debug("%s: trying different drive strength (%d)\n",
1289 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001290 if (card->ext_csd.raw_driver_strength &
1291 (1 << drv_type)) {
1292 sdhci_msm_set_mmc_drv_type(host, opcode,
1293 drv_type);
1294 if (!drv_type_changed)
1295 drv_type_changed = true;
1296 goto retry;
1297 }
1298 }
1299 }
1300
1301 /* reset drive type to default (50 ohm) if changed */
1302 if (drv_type_changed)
1303 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1304
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001305 if (tuned_phase_cnt) {
1306 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1307 tuned_phase_cnt);
1308 if (rc < 0)
1309 goto kfree;
1310 else
1311 phase = (u8)rc;
1312
1313 /*
1314 * Finally set the selected phase in delay
1315 * line hw block.
1316 */
1317 rc = msm_config_cm_dll_phase(host, phase);
1318 if (rc)
1319 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001320 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001321 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1322 mmc_hostname(mmc), __func__, phase);
1323 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301324 if (--tuning_seq_cnt)
1325 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001326 /* tuning failed */
1327 pr_err("%s: %s: no tuning point found\n",
1328 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301329 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001330 }
1331
1332kfree:
1333 kfree(data_buf);
1334out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001335 spin_lock_irqsave(&host->lock, flags);
1336 if (!rc)
1337 msm_host->tuning_done = true;
1338 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301339 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001340 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001341 return rc;
1342}
1343
Asutosh Das0ef24812012-12-18 16:14:02 +05301344static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1345{
1346 struct sdhci_msm_gpio_data *curr;
1347 int i, ret = 0;
1348
1349 curr = pdata->pin_data->gpio_data;
1350 for (i = 0; i < curr->size; i++) {
1351 if (!gpio_is_valid(curr->gpio[i].no)) {
1352 ret = -EINVAL;
1353 pr_err("%s: Invalid gpio = %d\n", __func__,
1354 curr->gpio[i].no);
1355 goto free_gpios;
1356 }
1357 if (enable) {
1358 ret = gpio_request(curr->gpio[i].no,
1359 curr->gpio[i].name);
1360 if (ret) {
1361 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1362 __func__, curr->gpio[i].no,
1363 curr->gpio[i].name, ret);
1364 goto free_gpios;
1365 }
1366 curr->gpio[i].is_enabled = true;
1367 } else {
1368 gpio_free(curr->gpio[i].no);
1369 curr->gpio[i].is_enabled = false;
1370 }
1371 }
1372 return ret;
1373
1374free_gpios:
1375 for (i--; i >= 0; i--) {
1376 gpio_free(curr->gpio[i].no);
1377 curr->gpio[i].is_enabled = false;
1378 }
1379 return ret;
1380}
1381
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301382static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1383 bool enable)
1384{
1385 int ret = 0;
1386
1387 if (enable)
1388 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1389 pdata->pctrl_data->pins_active);
1390 else
1391 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1392 pdata->pctrl_data->pins_sleep);
1393
1394 if (ret < 0)
1395 pr_err("%s state for pinctrl failed with %d\n",
1396 enable ? "Enabling" : "Disabling", ret);
1397
1398 return ret;
1399}
1400
Asutosh Das0ef24812012-12-18 16:14:02 +05301401static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1402{
1403 int ret = 0;
1404
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301405 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301406 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301407 } else if (pdata->pctrl_data) {
1408 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1409 goto out;
1410 } else if (!pdata->pin_data) {
1411 return 0;
1412 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301413
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301414 if (pdata->pin_data->is_gpio)
1415 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301416out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301417 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301418 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301419
1420 return ret;
1421}
1422
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301423static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1424 u32 **out, int *len, u32 size)
1425{
1426 int ret = 0;
1427 struct device_node *np = dev->of_node;
1428 size_t sz;
1429 u32 *arr = NULL;
1430
1431 if (!of_get_property(np, prop_name, len)) {
1432 ret = -EINVAL;
1433 goto out;
1434 }
1435 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001436 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301437 dev_err(dev, "%s invalid size\n", prop_name);
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1443 if (!arr) {
1444 dev_err(dev, "%s failed allocating memory\n", prop_name);
1445 ret = -ENOMEM;
1446 goto out;
1447 }
1448
1449 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1450 if (ret < 0) {
1451 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1452 goto out;
1453 }
1454 *out = arr;
1455out:
1456 if (ret)
1457 *len = 0;
1458 return ret;
1459}
1460
Asutosh Das0ef24812012-12-18 16:14:02 +05301461#define MAX_PROP_SIZE 32
1462static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1463 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1464{
1465 int len, ret = 0;
1466 const __be32 *prop;
1467 char prop_name[MAX_PROP_SIZE];
1468 struct sdhci_msm_reg_data *vreg;
1469 struct device_node *np = dev->of_node;
1470
1471 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1472 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301473 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301474 return ret;
1475 }
1476
1477 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1478 if (!vreg) {
1479 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1480 ret = -ENOMEM;
1481 return ret;
1482 }
1483
1484 vreg->name = vreg_name;
1485
1486 snprintf(prop_name, MAX_PROP_SIZE,
1487 "qcom,%s-always-on", vreg_name);
1488 if (of_get_property(np, prop_name, NULL))
1489 vreg->is_always_on = true;
1490
1491 snprintf(prop_name, MAX_PROP_SIZE,
1492 "qcom,%s-lpm-sup", vreg_name);
1493 if (of_get_property(np, prop_name, NULL))
1494 vreg->lpm_sup = true;
1495
1496 snprintf(prop_name, MAX_PROP_SIZE,
1497 "qcom,%s-voltage-level", vreg_name);
1498 prop = of_get_property(np, prop_name, &len);
1499 if (!prop || (len != (2 * sizeof(__be32)))) {
1500 dev_warn(dev, "%s %s property\n",
1501 prop ? "invalid format" : "no", prop_name);
1502 } else {
1503 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1504 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1505 }
1506
1507 snprintf(prop_name, MAX_PROP_SIZE,
1508 "qcom,%s-current-level", vreg_name);
1509 prop = of_get_property(np, prop_name, &len);
1510 if (!prop || (len != (2 * sizeof(__be32)))) {
1511 dev_warn(dev, "%s %s property\n",
1512 prop ? "invalid format" : "no", prop_name);
1513 } else {
1514 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1515 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1516 }
1517
1518 *vreg_data = vreg;
1519 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1520 vreg->name, vreg->is_always_on ? "always_on," : "",
1521 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1522 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1523
1524 return ret;
1525}
1526
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301527static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1528 struct sdhci_msm_pltfm_data *pdata)
1529{
1530 struct sdhci_pinctrl_data *pctrl_data;
1531 struct pinctrl *pctrl;
1532 int ret = 0;
1533
1534 /* Try to obtain pinctrl handle */
1535 pctrl = devm_pinctrl_get(dev);
1536 if (IS_ERR(pctrl)) {
1537 ret = PTR_ERR(pctrl);
1538 goto out;
1539 }
1540 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1541 if (!pctrl_data) {
1542 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1543 ret = -ENOMEM;
1544 goto out;
1545 }
1546 pctrl_data->pctrl = pctrl;
1547 /* Look-up and keep the states handy to be used later */
1548 pctrl_data->pins_active = pinctrl_lookup_state(
1549 pctrl_data->pctrl, "active");
1550 if (IS_ERR(pctrl_data->pins_active)) {
1551 ret = PTR_ERR(pctrl_data->pins_active);
1552 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1553 goto out;
1554 }
1555 pctrl_data->pins_sleep = pinctrl_lookup_state(
1556 pctrl_data->pctrl, "sleep");
1557 if (IS_ERR(pctrl_data->pins_sleep)) {
1558 ret = PTR_ERR(pctrl_data->pins_sleep);
1559 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1560 goto out;
1561 }
1562 pdata->pctrl_data = pctrl_data;
1563out:
1564 return ret;
1565}
1566
Asutosh Das0ef24812012-12-18 16:14:02 +05301567#define GPIO_NAME_MAX_LEN 32
1568static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1569 struct sdhci_msm_pltfm_data *pdata)
1570{
1571 int ret = 0, cnt, i;
1572 struct sdhci_msm_pin_data *pin_data;
1573 struct device_node *np = dev->of_node;
1574
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301575 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1576 if (!ret) {
1577 goto out;
1578 } else if (ret == -EPROBE_DEFER) {
1579 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1580 goto out;
1581 } else {
1582 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1583 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301584 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301585 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301586 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1587 if (!pin_data) {
1588 dev_err(dev, "No memory for pin_data\n");
1589 ret = -ENOMEM;
1590 goto out;
1591 }
1592
1593 cnt = of_gpio_count(np);
1594 if (cnt > 0) {
1595 pin_data->gpio_data = devm_kzalloc(dev,
1596 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1597 if (!pin_data->gpio_data) {
1598 dev_err(dev, "No memory for gpio_data\n");
1599 ret = -ENOMEM;
1600 goto out;
1601 }
1602 pin_data->gpio_data->size = cnt;
1603 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1604 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1605
1606 if (!pin_data->gpio_data->gpio) {
1607 dev_err(dev, "No memory for gpio\n");
1608 ret = -ENOMEM;
1609 goto out;
1610 }
1611
1612 for (i = 0; i < cnt; i++) {
1613 const char *name = NULL;
1614 char result[GPIO_NAME_MAX_LEN];
1615 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1616 of_property_read_string_index(np,
1617 "qcom,gpio-names", i, &name);
1618
1619 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1620 dev_name(dev), name ? name : "?");
1621 pin_data->gpio_data->gpio[i].name = result;
1622 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1623 pin_data->gpio_data->gpio[i].name,
1624 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301625 }
1626 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301627 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301628out:
1629 if (ret)
1630 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1631 return ret;
1632}
1633
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001634#ifdef CONFIG_SMP
1635static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1636{
1637 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1638}
1639#else
1640static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1641#endif
1642
Gilad Bronerc788a672015-09-08 15:39:11 +03001643static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1644 struct sdhci_msm_pltfm_data *pdata)
1645{
1646 struct device_node *np = dev->of_node;
1647 const char *str;
1648 u32 cpu;
1649 int ret = 0;
1650 int i;
1651
1652 pdata->pm_qos_data.irq_valid = false;
1653 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1654 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1655 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001656 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001657 }
1658
1659 /* must specify cpu for "affine_cores" type */
1660 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1661 pdata->pm_qos_data.irq_cpu = -1;
1662 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1663 if (ret) {
1664 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1665 ret);
1666 goto out;
1667 }
1668 if (cpu < 0 || cpu >= num_possible_cpus()) {
1669 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1670 __func__, cpu, num_possible_cpus());
1671 ret = -EINVAL;
1672 goto out;
1673 }
1674 pdata->pm_qos_data.irq_cpu = cpu;
1675 }
1676
1677 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1678 SDHCI_POWER_POLICY_NUM) {
1679 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1680 __func__, SDHCI_POWER_POLICY_NUM);
1681 ret = -EINVAL;
1682 goto out;
1683 }
1684
1685 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1686 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1687 &pdata->pm_qos_data.irq_latency.latency[i]);
1688
1689 pdata->pm_qos_data.irq_valid = true;
1690out:
1691 return ret;
1692}
1693
1694static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1695 struct sdhci_msm_pltfm_data *pdata)
1696{
1697 struct device_node *np = dev->of_node;
1698 u32 mask;
1699 int nr_groups;
1700 int ret;
1701 int i;
1702
1703 /* Read cpu group mapping */
1704 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1705 if (nr_groups <= 0) {
1706 ret = -EINVAL;
1707 goto out;
1708 }
1709 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1710 pdata->pm_qos_data.cpu_group_map.mask =
1711 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1712 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1713 ret = -ENOMEM;
1714 goto out;
1715 }
1716
1717 for (i = 0; i < nr_groups; i++) {
1718 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1719 i, &mask);
1720
1721 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1722 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1723 cpu_possible_mask)) {
1724 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1725 __func__, mask, i);
1726 ret = -EINVAL;
1727 goto free_res;
1728 }
1729 }
1730 return 0;
1731
1732free_res:
1733 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1734out:
1735 return ret;
1736}
1737
1738static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1739 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1740{
1741 struct device_node *np = dev->of_node;
1742 struct sdhci_msm_pm_qos_latency *values;
1743 int ret;
1744 int i;
1745 int group;
1746 int cfg;
1747
1748 ret = of_property_count_u32_elems(np, name);
1749 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1750 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1751 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1752 ret);
1753 return -EINVAL;
1754 } else if (ret < 0) {
1755 return ret;
1756 }
1757
1758 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1759 GFP_KERNEL);
1760 if (!values)
1761 return -ENOMEM;
1762
1763 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1764 group = i / SDHCI_POWER_POLICY_NUM;
1765 cfg = i % SDHCI_POWER_POLICY_NUM;
1766 of_property_read_u32_index(np, name, i,
1767 &(values[group].latency[cfg]));
1768 }
1769
1770 *latency = values;
1771 return 0;
1772}
1773
1774static void sdhci_msm_pm_qos_parse(struct device *dev,
1775 struct sdhci_msm_pltfm_data *pdata)
1776{
1777 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1778 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1779 __func__);
1780
1781 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1782 pdata->pm_qos_data.cmdq_valid =
1783 !sdhci_msm_pm_qos_parse_latency(dev,
1784 "qcom,pm-qos-cmdq-latency-us",
1785 pdata->pm_qos_data.cpu_group_map.nr_groups,
1786 &pdata->pm_qos_data.cmdq_latency);
1787 pdata->pm_qos_data.legacy_valid =
1788 !sdhci_msm_pm_qos_parse_latency(dev,
1789 "qcom,pm-qos-legacy-latency-us",
1790 pdata->pm_qos_data.cpu_group_map.nr_groups,
1791 &pdata->pm_qos_data.latency);
1792 if (!pdata->pm_qos_data.cmdq_valid &&
1793 !pdata->pm_qos_data.legacy_valid) {
1794 /* clean-up previously allocated arrays */
1795 kfree(pdata->pm_qos_data.latency);
1796 kfree(pdata->pm_qos_data.cmdq_latency);
1797 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1798 __func__);
1799 }
1800 } else {
1801 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1802 __func__);
1803 }
1804}
1805
Asutosh Das0ef24812012-12-18 16:14:02 +05301806/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001807static
1808struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1809 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301810{
1811 struct sdhci_msm_pltfm_data *pdata = NULL;
1812 struct device_node *np = dev->of_node;
1813 u32 bus_width = 0;
1814 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301815 int clk_table_len;
1816 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301817 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301818
1819 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1820 if (!pdata) {
1821 dev_err(dev, "failed to allocate memory for platform data\n");
1822 goto out;
1823 }
1824
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301825 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1826 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1827 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301828
Asutosh Das0ef24812012-12-18 16:14:02 +05301829 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1830 if (bus_width == 8)
1831 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1832 else if (bus_width == 4)
1833 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1834 else {
1835 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1836 pdata->mmc_bus_width = 0;
1837 }
1838
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001839 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1840 &msm_host->mmc->clk_scaling.freq_table,
1841 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1842 pr_debug("%s: no clock scaling frequencies were supplied\n",
1843 dev_name(dev));
1844 else if (!msm_host->mmc->clk_scaling.freq_table ||
1845 !msm_host->mmc->clk_scaling.freq_table_sz)
1846 dev_err(dev, "bad dts clock scaling frequencies\n");
1847
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301848 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1849 &clk_table, &clk_table_len, 0)) {
1850 dev_err(dev, "failed parsing supported clock rates\n");
1851 goto out;
1852 }
1853 if (!clk_table || !clk_table_len) {
1854 dev_err(dev, "Invalid clock table\n");
1855 goto out;
1856 }
1857 pdata->sup_clk_table = clk_table;
1858 pdata->sup_clk_cnt = clk_table_len;
1859
Asutosh Das0ef24812012-12-18 16:14:02 +05301860 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1861 sdhci_msm_slot_reg_data),
1862 GFP_KERNEL);
1863 if (!pdata->vreg_data) {
1864 dev_err(dev, "failed to allocate memory for vreg data\n");
1865 goto out;
1866 }
1867
1868 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1869 "vdd")) {
1870 dev_err(dev, "failed parsing vdd data\n");
1871 goto out;
1872 }
1873 if (sdhci_msm_dt_parse_vreg_info(dev,
1874 &pdata->vreg_data->vdd_io_data,
1875 "vdd-io")) {
1876 dev_err(dev, "failed parsing vdd-io data\n");
1877 goto out;
1878 }
1879
1880 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1881 dev_err(dev, "failed parsing gpio data\n");
1882 goto out;
1883 }
1884
Asutosh Das0ef24812012-12-18 16:14:02 +05301885 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1886
1887 for (i = 0; i < len; i++) {
1888 const char *name = NULL;
1889
1890 of_property_read_string_index(np,
1891 "qcom,bus-speed-mode", i, &name);
1892 if (!name)
1893 continue;
1894
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001895 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1896 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1897 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1898 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1899 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301900 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1901 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1902 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1903 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1904 pdata->caps |= MMC_CAP_1_8V_DDR
1905 | MMC_CAP_UHS_DDR50;
1906 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1907 pdata->caps |= MMC_CAP_1_2V_DDR
1908 | MMC_CAP_UHS_DDR50;
1909 }
1910
1911 if (of_get_property(np, "qcom,nonremovable", NULL))
1912 pdata->nonremovable = true;
1913
Guoping Yuf7c91332014-08-20 16:56:18 +08001914 if (of_get_property(np, "qcom,nonhotplug", NULL))
1915 pdata->nonhotplug = true;
1916
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001917 pdata->largeaddressbus =
1918 of_property_read_bool(np, "qcom,large-address-bus");
1919
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001920 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1921 msm_host->mmc->wakeup_on_idle = true;
1922
Gilad Bronerc788a672015-09-08 15:39:11 +03001923 sdhci_msm_pm_qos_parse(dev, pdata);
1924
Pavan Anamula5a256df2015-10-16 14:38:28 +05301925 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1926 pdata->core_3_0v_support = true;
1927
Asutosh Das0ef24812012-12-18 16:14:02 +05301928 return pdata;
1929out:
1930 return NULL;
1931}
1932
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301933/* Returns required bandwidth in Bytes per Sec */
1934static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1935 struct mmc_ios *ios)
1936{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301937 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1938 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1939
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301940 unsigned int bw;
1941
Sahitya Tummala2886c922013-04-03 18:03:31 +05301942 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301943 /*
1944 * For DDR mode, SDCC controller clock will be at
1945 * the double rate than the actual clock that goes to card.
1946 */
1947 if (ios->bus_width == MMC_BUS_WIDTH_4)
1948 bw /= 2;
1949 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1950 bw /= 8;
1951
1952 return bw;
1953}
1954
1955static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1956 unsigned int bw)
1957{
1958 unsigned int *table = host->pdata->voting_data->bw_vecs;
1959 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1960 int i;
1961
1962 if (host->msm_bus_vote.is_max_bw_needed && bw)
1963 return host->msm_bus_vote.max_bw_vote;
1964
1965 for (i = 0; i < size; i++) {
1966 if (bw <= table[i])
1967 break;
1968 }
1969
1970 if (i && (i == size))
1971 i--;
1972
1973 return i;
1974}
1975
1976/*
1977 * This function must be called with host lock acquired.
1978 * Caller of this function should also ensure that msm bus client
1979 * handle is not null.
1980 */
1981static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1982 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301983 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301984{
1985 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1986 int rc = 0;
1987
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301988 BUG_ON(!flags);
1989
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301990 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301991 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301992 rc = msm_bus_scale_client_update_request(
1993 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301994 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301995 if (rc) {
1996 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1997 mmc_hostname(host->mmc),
1998 msm_host->msm_bus_vote.client_handle, vote, rc);
1999 goto out;
2000 }
2001 msm_host->msm_bus_vote.curr_vote = vote;
2002 }
2003out:
2004 return rc;
2005}
2006
2007/*
2008 * Internal work. Work to set 0 bandwidth for msm bus.
2009 */
2010static void sdhci_msm_bus_work(struct work_struct *work)
2011{
2012 struct sdhci_msm_host *msm_host;
2013 struct sdhci_host *host;
2014 unsigned long flags;
2015
2016 msm_host = container_of(work, struct sdhci_msm_host,
2017 msm_bus_vote.vote_work.work);
2018 host = platform_get_drvdata(msm_host->pdev);
2019
2020 if (!msm_host->msm_bus_vote.client_handle)
2021 return;
2022
2023 spin_lock_irqsave(&host->lock, flags);
2024 /* don't vote for 0 bandwidth if any request is in progress */
2025 if (!host->mrq) {
2026 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302027 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302028 } else
2029 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2030 mmc_hostname(host->mmc), __func__);
2031 spin_unlock_irqrestore(&host->lock, flags);
2032}
2033
2034/*
2035 * This function cancels any scheduled delayed work and sets the bus
2036 * vote based on bw (bandwidth) argument.
2037 */
2038static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2039 unsigned int bw)
2040{
2041 int vote;
2042 unsigned long flags;
2043 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2044 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2045
2046 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2047 spin_lock_irqsave(&host->lock, flags);
2048 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302049 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302050 spin_unlock_irqrestore(&host->lock, flags);
2051}
2052
2053#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2054
2055/* This function queues a work which will set the bandwidth requiement to 0 */
2056static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2057{
2058 unsigned long flags;
2059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2060 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2061
2062 spin_lock_irqsave(&host->lock, flags);
2063 if (msm_host->msm_bus_vote.min_bw_vote !=
2064 msm_host->msm_bus_vote.curr_vote)
2065 queue_delayed_work(system_wq,
2066 &msm_host->msm_bus_vote.vote_work,
2067 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2068 spin_unlock_irqrestore(&host->lock, flags);
2069}
2070
2071static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2072 struct platform_device *pdev)
2073{
2074 int rc = 0;
2075 struct msm_bus_scale_pdata *bus_pdata;
2076
2077 struct sdhci_msm_bus_voting_data *data;
2078 struct device *dev = &pdev->dev;
2079
2080 data = devm_kzalloc(dev,
2081 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2082 if (!data) {
2083 dev_err(&pdev->dev,
2084 "%s: failed to allocate memory\n", __func__);
2085 rc = -ENOMEM;
2086 goto out;
2087 }
2088 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2089 if (data->bus_pdata) {
2090 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2091 &data->bw_vecs, &data->bw_vecs_size, 0);
2092 if (rc) {
2093 dev_err(&pdev->dev,
2094 "%s: Failed to get bus-bw-vectors-bps\n",
2095 __func__);
2096 goto out;
2097 }
2098 host->pdata->voting_data = data;
2099 }
2100 if (host->pdata->voting_data &&
2101 host->pdata->voting_data->bus_pdata &&
2102 host->pdata->voting_data->bw_vecs &&
2103 host->pdata->voting_data->bw_vecs_size) {
2104
2105 bus_pdata = host->pdata->voting_data->bus_pdata;
2106 host->msm_bus_vote.client_handle =
2107 msm_bus_scale_register_client(bus_pdata);
2108 if (!host->msm_bus_vote.client_handle) {
2109 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2110 rc = -EFAULT;
2111 goto out;
2112 }
2113 /* cache the vote index for minimum and maximum bandwidth */
2114 host->msm_bus_vote.min_bw_vote =
2115 sdhci_msm_bus_get_vote_for_bw(host, 0);
2116 host->msm_bus_vote.max_bw_vote =
2117 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2118 } else {
2119 devm_kfree(dev, data);
2120 }
2121
2122out:
2123 return rc;
2124}
2125
2126static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2127{
2128 if (host->msm_bus_vote.client_handle)
2129 msm_bus_scale_unregister_client(
2130 host->msm_bus_vote.client_handle);
2131}
2132
2133static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2134{
2135 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2136 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2137 struct mmc_ios *ios = &host->mmc->ios;
2138 unsigned int bw;
2139
2140 if (!msm_host->msm_bus_vote.client_handle)
2141 return;
2142
2143 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302144 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302145 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302146 } else {
2147 /*
2148 * If clock gating is enabled, then remove the vote
2149 * immediately because clocks will be disabled only
2150 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2151 * additional delay is required to remove the bus vote.
2152 */
2153#ifdef CONFIG_MMC_CLKGATE
2154 if (host->mmc->clkgate_delay)
2155 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2156 else
2157#endif
2158 sdhci_msm_bus_queue_work(host);
2159 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302160}
2161
Asutosh Das0ef24812012-12-18 16:14:02 +05302162/* Regulator utility functions */
2163static int sdhci_msm_vreg_init_reg(struct device *dev,
2164 struct sdhci_msm_reg_data *vreg)
2165{
2166 int ret = 0;
2167
2168 /* check if regulator is already initialized? */
2169 if (vreg->reg)
2170 goto out;
2171
2172 /* Get the regulator handle */
2173 vreg->reg = devm_regulator_get(dev, vreg->name);
2174 if (IS_ERR(vreg->reg)) {
2175 ret = PTR_ERR(vreg->reg);
2176 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2177 __func__, vreg->name, ret);
2178 goto out;
2179 }
2180
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302181 if (regulator_count_voltages(vreg->reg) > 0) {
2182 vreg->set_voltage_sup = true;
2183 /* sanity check */
2184 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2185 pr_err("%s: %s invalid constraints specified\n",
2186 __func__, vreg->name);
2187 ret = -EINVAL;
2188 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302189 }
2190
2191out:
2192 return ret;
2193}
2194
2195static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2196{
2197 if (vreg->reg)
2198 devm_regulator_put(vreg->reg);
2199}
2200
2201static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2202 *vreg, int uA_load)
2203{
2204 int ret = 0;
2205
2206 /*
2207 * regulators that do not support regulator_set_voltage also
2208 * do not support regulator_set_optimum_mode
2209 */
2210 if (vreg->set_voltage_sup) {
2211 ret = regulator_set_load(vreg->reg, uA_load);
2212 if (ret < 0)
2213 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2214 __func__, vreg->name, uA_load, ret);
2215 else
2216 /*
2217 * regulator_set_load() can return non zero
2218 * value even for success case.
2219 */
2220 ret = 0;
2221 }
2222 return ret;
2223}
2224
2225static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2226 int min_uV, int max_uV)
2227{
2228 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302229 if (vreg->set_voltage_sup) {
2230 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2231 if (ret) {
2232 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302233 __func__, vreg->name, min_uV, max_uV, ret);
2234 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302235 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302236
2237 return ret;
2238}
2239
2240static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2241{
2242 int ret = 0;
2243
2244 /* Put regulator in HPM (high power mode) */
2245 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2246 if (ret < 0)
2247 return ret;
2248
2249 if (!vreg->is_enabled) {
2250 /* Set voltage level */
2251 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2252 vreg->high_vol_level);
2253 if (ret)
2254 return ret;
2255 }
2256 ret = regulator_enable(vreg->reg);
2257 if (ret) {
2258 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2259 __func__, vreg->name, ret);
2260 return ret;
2261 }
2262 vreg->is_enabled = true;
2263 return ret;
2264}
2265
2266static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2267{
2268 int ret = 0;
2269
2270 /* Never disable regulator marked as always_on */
2271 if (vreg->is_enabled && !vreg->is_always_on) {
2272 ret = regulator_disable(vreg->reg);
2273 if (ret) {
2274 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2275 __func__, vreg->name, ret);
2276 goto out;
2277 }
2278 vreg->is_enabled = false;
2279
2280 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2281 if (ret < 0)
2282 goto out;
2283
2284 /* Set min. voltage level to 0 */
2285 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2286 if (ret)
2287 goto out;
2288 } else if (vreg->is_enabled && vreg->is_always_on) {
2289 if (vreg->lpm_sup) {
2290 /* Put always_on regulator in LPM (low power mode) */
2291 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2292 vreg->lpm_uA);
2293 if (ret < 0)
2294 goto out;
2295 }
2296 }
2297out:
2298 return ret;
2299}
2300
2301static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2302 bool enable, bool is_init)
2303{
2304 int ret = 0, i;
2305 struct sdhci_msm_slot_reg_data *curr_slot;
2306 struct sdhci_msm_reg_data *vreg_table[2];
2307
2308 curr_slot = pdata->vreg_data;
2309 if (!curr_slot) {
2310 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2311 __func__);
2312 goto out;
2313 }
2314
2315 vreg_table[0] = curr_slot->vdd_data;
2316 vreg_table[1] = curr_slot->vdd_io_data;
2317
2318 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2319 if (vreg_table[i]) {
2320 if (enable)
2321 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2322 else
2323 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2324 if (ret)
2325 goto out;
2326 }
2327 }
2328out:
2329 return ret;
2330}
2331
2332/*
2333 * Reset vreg by ensuring it is off during probe. A call
2334 * to enable vreg is needed to balance disable vreg
2335 */
2336static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2337{
2338 int ret;
2339
2340 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2341 if (ret)
2342 return ret;
2343 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2344 return ret;
2345}
2346
2347/* This init function should be called only once for each SDHC slot */
2348static int sdhci_msm_vreg_init(struct device *dev,
2349 struct sdhci_msm_pltfm_data *pdata,
2350 bool is_init)
2351{
2352 int ret = 0;
2353 struct sdhci_msm_slot_reg_data *curr_slot;
2354 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2355
2356 curr_slot = pdata->vreg_data;
2357 if (!curr_slot)
2358 goto out;
2359
2360 curr_vdd_reg = curr_slot->vdd_data;
2361 curr_vdd_io_reg = curr_slot->vdd_io_data;
2362
2363 if (!is_init)
2364 /* Deregister all regulators from regulator framework */
2365 goto vdd_io_reg_deinit;
2366
2367 /*
2368 * Get the regulator handle from voltage regulator framework
2369 * and then try to set the voltage level for the regulator
2370 */
2371 if (curr_vdd_reg) {
2372 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2373 if (ret)
2374 goto out;
2375 }
2376 if (curr_vdd_io_reg) {
2377 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2378 if (ret)
2379 goto vdd_reg_deinit;
2380 }
2381 ret = sdhci_msm_vreg_reset(pdata);
2382 if (ret)
2383 dev_err(dev, "vreg reset failed (%d)\n", ret);
2384 goto out;
2385
2386vdd_io_reg_deinit:
2387 if (curr_vdd_io_reg)
2388 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2389vdd_reg_deinit:
2390 if (curr_vdd_reg)
2391 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2392out:
2393 return ret;
2394}
2395
2396
2397static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2398 enum vdd_io_level level,
2399 unsigned int voltage_level)
2400{
2401 int ret = 0;
2402 int set_level;
2403 struct sdhci_msm_reg_data *vdd_io_reg;
2404
2405 if (!pdata->vreg_data)
2406 return ret;
2407
2408 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2409 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2410 switch (level) {
2411 case VDD_IO_LOW:
2412 set_level = vdd_io_reg->low_vol_level;
2413 break;
2414 case VDD_IO_HIGH:
2415 set_level = vdd_io_reg->high_vol_level;
2416 break;
2417 case VDD_IO_SET_LEVEL:
2418 set_level = voltage_level;
2419 break;
2420 default:
2421 pr_err("%s: invalid argument level = %d",
2422 __func__, level);
2423 ret = -EINVAL;
2424 return ret;
2425 }
2426 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2427 set_level);
2428 }
2429 return ret;
2430}
2431
Ritesh Harjani42876f42015-11-17 17:46:51 +05302432/*
2433 * Acquire spin-lock host->lock before calling this function
2434 */
2435static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2436 bool enable)
2437{
2438 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2439 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2440
2441 if (enable && !msm_host->is_sdiowakeup_enabled)
2442 enable_irq(msm_host->pdata->sdiowakeup_irq);
2443 else if (!enable && msm_host->is_sdiowakeup_enabled)
2444 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2445 else
2446 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2447 __func__, enable, msm_host->is_sdiowakeup_enabled);
2448 msm_host->is_sdiowakeup_enabled = enable;
2449}
2450
2451static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2452{
2453 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2456
Ritesh Harjani42876f42015-11-17 17:46:51 +05302457 unsigned long flags;
2458
2459 pr_debug("%s: irq (%d) received\n", __func__, irq);
2460
2461 spin_lock_irqsave(&host->lock, flags);
2462 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2463 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302464 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302465
2466 return IRQ_HANDLED;
2467}
2468
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302469void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2470{
2471 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2472 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302473 const struct sdhci_msm_offset *msm_host_offset =
2474 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302475
2476 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2477 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302478 sdhci_msm_readl_relaxed(host,
2479 msm_host_offset->CORE_PWRCTL_STATUS),
2480 sdhci_msm_readl_relaxed(host,
2481 msm_host_offset->CORE_PWRCTL_MASK),
2482 sdhci_msm_readl_relaxed(host,
2483 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302484}
2485
Asutosh Das0ef24812012-12-18 16:14:02 +05302486static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2487{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002488 struct sdhci_host *host = (struct sdhci_host *)data;
2489 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2490 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302491 const struct sdhci_msm_offset *msm_host_offset =
2492 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302493 u8 irq_status = 0;
2494 u8 irq_ack = 0;
2495 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302496 int pwr_state = 0, io_level = 0;
2497 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302498 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302499
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302500 irq_status = sdhci_msm_readb_relaxed(host,
2501 msm_host_offset->CORE_PWRCTL_STATUS);
2502
Asutosh Das0ef24812012-12-18 16:14:02 +05302503 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2504 mmc_hostname(msm_host->mmc), irq, irq_status);
2505
2506 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302507 sdhci_msm_writeb_relaxed(irq_status, host,
2508 msm_host_offset->CORE_PWRCTL_CLEAR);
2509
Asutosh Das0ef24812012-12-18 16:14:02 +05302510 /*
2511 * SDHC has core_mem and hc_mem device memory and these memory
2512 * addresses do not fall within 1KB region. Hence, any update to
2513 * core_mem address space would require an mb() to ensure this gets
2514 * completed before its next update to registers within hc_mem.
2515 */
2516 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302517 /*
2518 * There is a rare HW scenario where the first clear pulse could be
2519 * lost when actual reset and clear/read of status register is
2520 * happening at a time. Hence, retry for at least 10 times to make
2521 * sure status register is cleared. Otherwise, this will result in
2522 * a spurious power IRQ resulting in system instability.
2523 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302524 while (irq_status & sdhci_msm_readb_relaxed(host,
2525 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302526 if (retry == 0) {
2527 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2528 mmc_hostname(host->mmc), irq_status);
2529 sdhci_msm_dump_pwr_ctrl_regs(host);
2530 BUG_ON(1);
2531 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302532 sdhci_msm_writeb_relaxed(irq_status, host,
2533 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302534 retry--;
2535 udelay(10);
2536 }
2537 if (likely(retry < 10))
2538 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2539 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302540
2541 /* Handle BUS ON/OFF*/
2542 if (irq_status & CORE_PWRCTL_BUS_ON) {
2543 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302544 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302545 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302546 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2547 VDD_IO_HIGH, 0);
2548 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302549 if (ret)
2550 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2551 else
2552 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302553
2554 pwr_state = REQ_BUS_ON;
2555 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302556 }
2557 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2558 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302559 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302560 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302561 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2562 VDD_IO_LOW, 0);
2563 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302564 if (ret)
2565 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2566 else
2567 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302568
2569 pwr_state = REQ_BUS_OFF;
2570 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302571 }
2572 /* Handle IO LOW/HIGH */
2573 if (irq_status & CORE_PWRCTL_IO_LOW) {
2574 /* Switch voltage Low */
2575 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2576 if (ret)
2577 irq_ack |= CORE_PWRCTL_IO_FAIL;
2578 else
2579 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302580
2581 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302582 }
2583 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2584 /* Switch voltage High */
2585 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2586 if (ret)
2587 irq_ack |= CORE_PWRCTL_IO_FAIL;
2588 else
2589 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302590
2591 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302592 }
2593
2594 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302595 sdhci_msm_writeb_relaxed(irq_ack, host,
2596 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302597 /*
2598 * SDHC has core_mem and hc_mem device memory and these memory
2599 * addresses do not fall within 1KB region. Hence, any update to
2600 * core_mem address space would require an mb() to ensure this gets
2601 * completed before its next update to registers within hc_mem.
2602 */
2603 mb();
2604
Krishna Konda46fd1432014-10-30 21:13:27 -07002605 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302606 writel_relaxed((readl_relaxed(host->ioaddr +
2607 msm_host_offset->CORE_VENDOR_SPEC) &
2608 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2609 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002610 else if ((io_level & REQ_IO_LOW) ||
2611 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302612 writel_relaxed((readl_relaxed(host->ioaddr +
2613 msm_host_offset->CORE_VENDOR_SPEC) |
2614 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2615 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002616 mb();
2617
Asutosh Das0ef24812012-12-18 16:14:02 +05302618 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2619 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302620 spin_lock_irqsave(&host->lock, flags);
2621 if (pwr_state)
2622 msm_host->curr_pwr_state = pwr_state;
2623 if (io_level)
2624 msm_host->curr_io_level = io_level;
2625 complete(&msm_host->pwr_irq_completion);
2626 spin_unlock_irqrestore(&host->lock, flags);
2627
Asutosh Das0ef24812012-12-18 16:14:02 +05302628 return IRQ_HANDLED;
2629}
2630
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302631static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302632show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2633{
2634 struct sdhci_host *host = dev_get_drvdata(dev);
2635 int poll;
2636 unsigned long flags;
2637
2638 spin_lock_irqsave(&host->lock, flags);
2639 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2640 spin_unlock_irqrestore(&host->lock, flags);
2641
2642 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2643}
2644
2645static ssize_t
2646store_polling(struct device *dev, struct device_attribute *attr,
2647 const char *buf, size_t count)
2648{
2649 struct sdhci_host *host = dev_get_drvdata(dev);
2650 int value;
2651 unsigned long flags;
2652
2653 if (!kstrtou32(buf, 0, &value)) {
2654 spin_lock_irqsave(&host->lock, flags);
2655 if (value) {
2656 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2657 mmc_detect_change(host->mmc, 0);
2658 } else {
2659 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2660 }
2661 spin_unlock_irqrestore(&host->lock, flags);
2662 }
2663 return count;
2664}
2665
2666static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302667show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2668 char *buf)
2669{
2670 struct sdhci_host *host = dev_get_drvdata(dev);
2671 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2672 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2673
2674 return snprintf(buf, PAGE_SIZE, "%u\n",
2675 msm_host->msm_bus_vote.is_max_bw_needed);
2676}
2677
2678static ssize_t
2679store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2680 const char *buf, size_t count)
2681{
2682 struct sdhci_host *host = dev_get_drvdata(dev);
2683 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2684 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2685 uint32_t value;
2686 unsigned long flags;
2687
2688 if (!kstrtou32(buf, 0, &value)) {
2689 spin_lock_irqsave(&host->lock, flags);
2690 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2691 spin_unlock_irqrestore(&host->lock, flags);
2692 }
2693 return count;
2694}
2695
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302696static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302697{
2698 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2699 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302700 const struct sdhci_msm_offset *msm_host_offset =
2701 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302702 unsigned long flags;
2703 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302704 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302705
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302706 spin_lock_irqsave(&host->lock, flags);
2707 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2708 mmc_hostname(host->mmc), __func__, req_type,
2709 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302710 io_sig_sts = sdhci_msm_readl_relaxed(host,
2711 msm_host_offset->CORE_GENERICS);
2712
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302713 /*
2714 * The IRQ for request type IO High/Low will be generated when -
2715 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2716 * 2. If 1 is true and when there is a state change in 1.8V enable
2717 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2718 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2719 * layer tries to set it to 3.3V before card detection happens, the
2720 * IRQ doesn't get triggered as there is no state change in this bit.
2721 * The driver already handles this case by changing the IO voltage
2722 * level to high as part of controller power up sequence. Hence, check
2723 * for host->pwr to handle a case where IO voltage high request is
2724 * issued even before controller power up.
2725 */
2726 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2727 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2728 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2729 pr_debug("%s: do not wait for power IRQ that never comes\n",
2730 mmc_hostname(host->mmc));
2731 spin_unlock_irqrestore(&host->lock, flags);
2732 return;
2733 }
2734 }
2735
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302736 if ((req_type & msm_host->curr_pwr_state) ||
2737 (req_type & msm_host->curr_io_level))
2738 done = true;
2739 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302740
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302741 /*
2742 * This is needed here to hanlde a case where IRQ gets
2743 * triggered even before this function is called so that
2744 * x->done counter of completion gets reset. Otherwise,
2745 * next call to wait_for_completion returns immediately
2746 * without actually waiting for the IRQ to be handled.
2747 */
2748 if (done)
2749 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302750 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
2751 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
2752 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2753 mmc_hostname(host->mmc), req_type);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302754
2755 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2756 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302757}
2758
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002759static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2760{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302761 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2762 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2763 const struct sdhci_msm_offset *msm_host_offset =
2764 msm_host->offset;
2765 u32 config = readl_relaxed(host->ioaddr +
2766 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302767
2768 if (enable) {
2769 config |= CORE_CDR_EN;
2770 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302771 writel_relaxed(config, host->ioaddr +
2772 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302773 } else {
2774 config &= ~CORE_CDR_EN;
2775 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302776 writel_relaxed(config, host->ioaddr +
2777 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302778 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002779}
2780
Asutosh Das648f9d12013-01-10 21:11:04 +05302781static unsigned int sdhci_msm_max_segs(void)
2782{
2783 return SDHCI_MSM_MAX_SEGMENTS;
2784}
2785
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302786static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302787{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302788 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2789 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302790
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302791 return msm_host->pdata->sup_clk_table[0];
2792}
2793
2794static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2795{
2796 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2797 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2798 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2799
2800 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2801}
2802
2803static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2804 u32 req_clk)
2805{
2806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2807 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2808 unsigned int sel_clk = -1;
2809 unsigned char cnt;
2810
2811 if (req_clk < sdhci_msm_get_min_clock(host)) {
2812 sel_clk = sdhci_msm_get_min_clock(host);
2813 return sel_clk;
2814 }
2815
2816 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2817 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2818 break;
2819 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2820 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2821 break;
2822 } else {
2823 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2824 }
2825 }
2826 return sel_clk;
2827}
2828
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302829static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2830{
2831 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2832 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2833 int rc = 0;
2834
2835 if (atomic_read(&msm_host->controller_clock))
2836 return 0;
2837
2838 sdhci_msm_bus_voting(host, 1);
2839
2840 if (!IS_ERR(msm_host->pclk)) {
2841 rc = clk_prepare_enable(msm_host->pclk);
2842 if (rc) {
2843 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2844 mmc_hostname(host->mmc), __func__, rc);
2845 goto remove_vote;
2846 }
2847 }
2848
2849 rc = clk_prepare_enable(msm_host->clk);
2850 if (rc) {
2851 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2852 mmc_hostname(host->mmc), __func__, rc);
2853 goto disable_pclk;
2854 }
2855
2856 atomic_set(&msm_host->controller_clock, 1);
2857 pr_debug("%s: %s: enabled controller clock\n",
2858 mmc_hostname(host->mmc), __func__);
2859 goto out;
2860
2861disable_pclk:
2862 if (!IS_ERR(msm_host->pclk))
2863 clk_disable_unprepare(msm_host->pclk);
2864remove_vote:
2865 if (msm_host->msm_bus_vote.client_handle)
2866 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2867out:
2868 return rc;
2869}
2870
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302871static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
2872{
2873 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2874 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302875
Sayali Lokhandeb30295162016-11-18 16:05:50 +05302876 if (atomic_read(&msm_host->controller_clock)) {
2877 if (!IS_ERR(msm_host->clk))
2878 clk_disable_unprepare(msm_host->clk);
2879 if (!IS_ERR(msm_host->pclk))
2880 clk_disable_unprepare(msm_host->pclk);
2881 if (!IS_ERR(msm_host->ice_clk))
2882 clk_disable_unprepare(msm_host->ice_clk);
2883 sdhci_msm_bus_voting(host, 0);
2884 atomic_set(&msm_host->controller_clock, 0);
2885 pr_debug("%s: %s: disabled controller clock\n",
2886 mmc_hostname(host->mmc), __func__);
2887 }
2888}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302889
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302890static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2891{
2892 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2893 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2894 int rc = 0;
2895
2896 if (enable && !atomic_read(&msm_host->clks_on)) {
2897 pr_debug("%s: request to enable clocks\n",
2898 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302899
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302900 /*
2901 * The bus-width or the clock rate might have changed
2902 * after controller clocks are enbaled, update bus vote
2903 * in such case.
2904 */
2905 if (atomic_read(&msm_host->controller_clock))
2906 sdhci_msm_bus_voting(host, 1);
2907
2908 rc = sdhci_msm_enable_controller_clock(host);
2909 if (rc)
2910 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302911
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302912 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2913 rc = clk_prepare_enable(msm_host->bus_clk);
2914 if (rc) {
2915 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2916 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302917 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302918 }
2919 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002920 if (!IS_ERR(msm_host->ff_clk)) {
2921 rc = clk_prepare_enable(msm_host->ff_clk);
2922 if (rc) {
2923 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2924 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302925 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002926 }
2927 }
2928 if (!IS_ERR(msm_host->sleep_clk)) {
2929 rc = clk_prepare_enable(msm_host->sleep_clk);
2930 if (rc) {
2931 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2932 mmc_hostname(host->mmc), __func__, rc);
2933 goto disable_ff_clk;
2934 }
2935 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302936 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302937
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302938 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302939 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2940 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302941 /*
2942 * During 1.8V signal switching the clock source must
2943 * still be ON as it requires accessing SDHC
2944 * registers (SDHCi host control2 register bit 3 must
2945 * be written and polled after stopping the SDCLK).
2946 */
2947 if (host->mmc->card_clock_off)
2948 return 0;
2949 pr_debug("%s: request to disable clocks\n",
2950 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002951 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2952 clk_disable_unprepare(msm_host->sleep_clk);
2953 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2954 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302955 clk_disable_unprepare(msm_host->clk);
2956 if (!IS_ERR(msm_host->pclk))
2957 clk_disable_unprepare(msm_host->pclk);
2958 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2959 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302960
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302961 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302962 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302963 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302964 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302965 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002966disable_ff_clk:
2967 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2968 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302969disable_bus_clk:
2970 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2971 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302972disable_controller_clk:
2973 if (!IS_ERR_OR_NULL(msm_host->clk))
2974 clk_disable_unprepare(msm_host->clk);
2975 if (!IS_ERR_OR_NULL(msm_host->pclk))
2976 clk_disable_unprepare(msm_host->pclk);
2977 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302978remove_vote:
2979 if (msm_host->msm_bus_vote.client_handle)
2980 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302981out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302982 return rc;
2983}
2984
2985static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2986{
2987 int rc;
2988 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2989 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302990 const struct sdhci_msm_offset *msm_host_offset =
2991 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002992 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302993 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002994 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302995 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302996
2997 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302998 /*
2999 * disable pwrsave to ensure clock is not auto-gated until
3000 * the rate is >400KHz (initialization complete).
3001 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303002 writel_relaxed(readl_relaxed(host->ioaddr +
3003 msm_host_offset->CORE_VENDOR_SPEC) &
3004 ~CORE_CLK_PWRSAVE, host->ioaddr +
3005 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303006 sdhci_msm_prepare_clocks(host, false);
3007 host->clock = clock;
3008 goto out;
3009 }
3010
3011 rc = sdhci_msm_prepare_clocks(host, true);
3012 if (rc)
3013 goto out;
3014
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303015 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3016 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303017 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003018 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303019 writel_relaxed(readl_relaxed(host->ioaddr +
3020 msm_host_offset->CORE_VENDOR_SPEC)
3021 | CORE_CLK_PWRSAVE, host->ioaddr +
3022 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303023 /*
3024 * Disable pwrsave for a newly added card if doesn't allow clock
3025 * gating.
3026 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003027 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303028 writel_relaxed(readl_relaxed(host->ioaddr +
3029 msm_host_offset->CORE_VENDOR_SPEC)
3030 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3031 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303032
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303033 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003034 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003035 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003036 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303037 /*
3038 * The SDHC requires internal clock frequency to be double the
3039 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003040 * uses the faster clock(100/400MHz) for some of its parts and
3041 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303042 */
3043 ddr_clock = clock * 2;
3044 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3045 ddr_clock);
3046 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003047
3048 /*
3049 * In general all timing modes are controlled via UHS mode select in
3050 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3051 * their respective modes defined here, hence we use these values.
3052 *
3053 * HS200 - SDR104 (Since they both are equivalent in functionality)
3054 * HS400 - This involves multiple configurations
3055 * Initially SDR104 - when tuning is required as HS200
3056 * Then when switching to DDR @ 400MHz (HS400) we use
3057 * the vendor specific HC_SELECT_IN to control the mode.
3058 *
3059 * In addition to controlling the modes we also need to select the
3060 * correct input clock for DLL depending on the mode.
3061 *
3062 * HS400 - divided clock (free running MCLK/2)
3063 * All other modes - default (free running MCLK)
3064 */
3065 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3066 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303067 writel_relaxed(((readl_relaxed(host->ioaddr +
3068 msm_host_offset->CORE_VENDOR_SPEC)
3069 & ~CORE_HC_MCLK_SEL_MASK)
3070 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3071 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003072 /*
3073 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3074 * register
3075 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303076 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003077 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303078 msm_host->enhanced_strobe)) &&
3079 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003080 /*
3081 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3082 * field in VENDOR_SPEC_FUNC
3083 */
3084 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303085 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003086 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303087 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3088 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003089 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003090 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3091 /*
3092 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3093 * CORE_DLL_STATUS to be set. This should get set
3094 * with in 15 us at 200 MHz.
3095 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303096 rc = readl_poll_timeout(host->ioaddr +
3097 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003098 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3099 CORE_DDR_DLL_LOCK)), 10, 1000);
3100 if (rc == -ETIMEDOUT)
3101 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3102 mmc_hostname(host->mmc),
3103 dll_lock);
3104 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003105 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003106 if (!msm_host->use_cdclp533)
3107 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3108 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303109 msm_host_offset->CORE_VENDOR_SPEC3)
3110 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3111 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003112
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003113 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303114 writel_relaxed(((readl_relaxed(host->ioaddr +
3115 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003116 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303117 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3118 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003119
3120 /*
3121 * Disable HC_SELECT_IN to be able to use the UHS mode select
3122 * configuration from Host Control2 register for all other
3123 * modes.
3124 *
3125 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3126 * in VENDOR_SPEC_FUNC
3127 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303128 writel_relaxed((readl_relaxed(host->ioaddr +
3129 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003130 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303131 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3132 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003133 }
3134 mb();
3135
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303136 if (sup_clock != msm_host->clk_rate) {
3137 pr_debug("%s: %s: setting clk rate to %u\n",
3138 mmc_hostname(host->mmc), __func__, sup_clock);
3139 rc = clk_set_rate(msm_host->clk, sup_clock);
3140 if (rc) {
3141 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3142 mmc_hostname(host->mmc), __func__,
3143 sup_clock, rc);
3144 goto out;
3145 }
3146 msm_host->clk_rate = sup_clock;
3147 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303148 /*
3149 * Update the bus vote in case of frequency change due to
3150 * clock scaling.
3151 */
3152 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303153 }
3154out:
3155 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303156}
3157
Sahitya Tummala14613432013-03-21 11:13:25 +05303158static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3159 unsigned int uhs)
3160{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003161 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3162 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303163 const struct sdhci_msm_offset *msm_host_offset =
3164 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303165 u16 ctrl_2;
3166
3167 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3168 /* Select Bus Speed Mode for host */
3169 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003170 if ((uhs == MMC_TIMING_MMC_HS400) ||
3171 (uhs == MMC_TIMING_MMC_HS200) ||
3172 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303173 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3174 else if (uhs == MMC_TIMING_UHS_SDR12)
3175 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3176 else if (uhs == MMC_TIMING_UHS_SDR25)
3177 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3178 else if (uhs == MMC_TIMING_UHS_SDR50)
3179 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003180 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3181 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303182 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303183 /*
3184 * When clock frquency is less than 100MHz, the feedback clock must be
3185 * provided and DLL must not be used so that tuning can be skipped. To
3186 * provide feedback clock, the mode selection can be any value less
3187 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3188 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003189 if (host->clock <= CORE_FREQ_100MHZ) {
3190 if ((uhs == MMC_TIMING_MMC_HS400) ||
3191 (uhs == MMC_TIMING_MMC_HS200) ||
3192 (uhs == MMC_TIMING_UHS_SDR104))
3193 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303194
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003195 /*
3196 * Make sure DLL is disabled when not required
3197 *
3198 * Write 1 to DLL_RST bit of DLL_CONFIG register
3199 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303200 writel_relaxed((readl_relaxed(host->ioaddr +
3201 msm_host_offset->CORE_DLL_CONFIG)
3202 | CORE_DLL_RST), host->ioaddr +
3203 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003204
3205 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303206 writel_relaxed((readl_relaxed(host->ioaddr +
3207 msm_host_offset->CORE_DLL_CONFIG)
3208 | CORE_DLL_PDN), host->ioaddr +
3209 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003210 mb();
3211
3212 /*
3213 * The DLL needs to be restored and CDCLP533 recalibrated
3214 * when the clock frequency is set back to 400MHz.
3215 */
3216 msm_host->calibration_done = false;
3217 }
3218
3219 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3220 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303221 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3222
3223}
3224
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003225#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003226#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303227static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003228{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303229 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303230 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3231 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303232 const struct sdhci_msm_offset *msm_host_offset =
3233 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303234 struct cmdq_host *cq_host = host->cq_host;
3235
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303236 u32 version = sdhci_msm_readl_relaxed(host,
3237 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003238 u16 minor = version & CORE_VERSION_TARGET_MASK;
3239 /* registers offset changed starting from 4.2.0 */
3240 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3241
3242 pr_err("---- Debug RAM dump ----\n");
3243 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3244 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3245 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3246
3247 while (i < 16) {
3248 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3249 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3250 i++;
3251 }
3252 pr_err("-------------------------\n");
3253}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303254
3255void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3256{
3257 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3258 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303259 const struct sdhci_msm_offset *msm_host_offset =
3260 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303261 int tbsel, tbsel2;
3262 int i, index = 0;
3263 u32 test_bus_val = 0;
3264 u32 debug_reg[MAX_TEST_BUS] = {0};
3265
3266 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003267 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303268 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003269
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303270 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3271 sdhci_msm_readl_relaxed(host,
3272 msm_host_offset->CORE_MCI_DATA_CNT),
3273 sdhci_msm_readl_relaxed(host,
3274 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303275 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303276 sdhci_msm_readl_relaxed(host,
3277 msm_host_offset->CORE_MCI_DATA_CNT),
3278 sdhci_msm_readl_relaxed(host,
3279 msm_host_offset->CORE_MCI_FIFO_CNT),
3280 sdhci_msm_readl_relaxed(host,
3281 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303282 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303283 readl_relaxed(host->ioaddr +
3284 msm_host_offset->CORE_DLL_CONFIG),
3285 readl_relaxed(host->ioaddr +
3286 msm_host_offset->CORE_DLL_STATUS),
3287 sdhci_msm_readl_relaxed(host,
3288 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303289 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303290 readl_relaxed(host->ioaddr +
3291 msm_host_offset->CORE_VENDOR_SPEC),
3292 readl_relaxed(host->ioaddr +
3293 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3294 readl_relaxed(host->ioaddr +
3295 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303296 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303297 readl_relaxed(host->ioaddr +
3298 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303299
3300 /*
3301 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3302 * of CORE_TESTBUS_CONFIG register.
3303 *
3304 * To select test bus 0 to 7 use tbsel and to select any test bus
3305 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3306 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3307 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3308 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003309 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303310 for (tbsel = 0; tbsel < 8; tbsel++) {
3311 if (index >= MAX_TEST_BUS)
3312 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303313 test_bus_val =
3314 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3315 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3316 sdhci_msm_writel_relaxed(test_bus_val, host,
3317 msm_host_offset->CORE_TESTBUS_CONFIG);
3318 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3319 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303320 }
3321 }
3322 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3323 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3324 i, i + 3, debug_reg[i], debug_reg[i+1],
3325 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003326}
3327
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303328/*
3329 * sdhci_msm_enhanced_strobe_mask :-
3330 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3331 * SW should write 3 to
3332 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3333 * The default reset value of this register is 2.
3334 */
3335static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3336{
3337 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3338 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303339 const struct sdhci_msm_offset *msm_host_offset =
3340 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303341
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303342 if (!msm_host->enhanced_strobe ||
3343 !mmc_card_strobe(msm_host->mmc->card)) {
3344 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303345 mmc_hostname(host->mmc));
3346 return;
3347 }
3348
3349 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303350 writel_relaxed((readl_relaxed(host->ioaddr +
3351 msm_host_offset->CORE_VENDOR_SPEC3)
3352 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3353 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303354 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303355 writel_relaxed((readl_relaxed(host->ioaddr +
3356 msm_host_offset->CORE_VENDOR_SPEC3)
3357 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3358 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303359 }
3360}
3361
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003362static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3363{
3364 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3365 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303366 const struct sdhci_msm_offset *msm_host_offset =
3367 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003368
3369 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303370 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3371 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003372 } else {
3373 u32 value;
3374
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303375 value = sdhci_msm_readl_relaxed(host,
3376 msm_host_offset->CORE_TESTBUS_CONFIG);
3377 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3378 sdhci_msm_writel_relaxed(value, host,
3379 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003380 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303381}
3382
Pavan Anamula691dd592015-08-25 16:11:20 +05303383void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3384{
3385 u32 vendor_func2;
3386 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303387 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3388 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3389 const struct sdhci_msm_offset *msm_host_offset =
3390 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303391
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303392 vendor_func2 = readl_relaxed(host->ioaddr +
3393 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303394
3395 if (enable) {
3396 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303397 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303398 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303399 while (readl_relaxed(host->ioaddr +
3400 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303401 if (timeout == 0) {
3402 pr_info("%s: Applying wait idle disable workaround\n",
3403 mmc_hostname(host->mmc));
3404 /*
3405 * Apply the reset workaround to not wait for
3406 * pending data transfers on AXI before
3407 * resetting the controller. This could be
3408 * risky if the transfers were stuck on the
3409 * AXI bus.
3410 */
3411 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303412 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303413 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303414 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3415 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303416 host->reset_wa_t = ktime_get();
3417 return;
3418 }
3419 timeout--;
3420 udelay(10);
3421 }
3422 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3423 mmc_hostname(host->mmc));
3424 } else {
3425 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303426 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303427 }
3428}
3429
Gilad Broner44445992015-09-29 16:05:39 +03003430static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3431{
3432 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303433 container_of(work, struct sdhci_msm_pm_qos_irq,
3434 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003435
3436 if (atomic_read(&pm_qos_irq->counter))
3437 return;
3438
3439 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3440 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3441}
3442
3443void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3444{
3445 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3446 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3447 struct sdhci_msm_pm_qos_latency *latency =
3448 &msm_host->pdata->pm_qos_data.irq_latency;
3449 int counter;
3450
3451 if (!msm_host->pm_qos_irq.enabled)
3452 return;
3453
3454 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3455 /* Make sure to update the voting in case power policy has changed */
3456 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3457 && counter > 1)
3458 return;
3459
Asutosh Das36c2e922015-12-01 12:19:58 +05303460 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003461 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3462 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3463 msm_host->pm_qos_irq.latency);
3464}
3465
3466void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3467{
3468 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3469 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3470 int counter;
3471
3472 if (!msm_host->pm_qos_irq.enabled)
3473 return;
3474
Subhash Jadavani4d813902015-10-15 12:16:43 -07003475 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3476 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3477 } else {
3478 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3479 return;
Gilad Broner44445992015-09-29 16:05:39 +03003480 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003481
Gilad Broner44445992015-09-29 16:05:39 +03003482 if (counter)
3483 return;
3484
3485 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303486 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3487 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003488 return;
3489 }
3490
3491 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3492 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3493 msm_host->pm_qos_irq.latency);
3494}
3495
Gilad Broner68c54562015-09-20 11:59:46 +03003496static ssize_t
3497sdhci_msm_pm_qos_irq_show(struct device *dev,
3498 struct device_attribute *attr, char *buf)
3499{
3500 struct sdhci_host *host = dev_get_drvdata(dev);
3501 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3502 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3503 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3504
3505 return snprintf(buf, PAGE_SIZE,
3506 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3507 irq->enabled, atomic_read(&irq->counter), irq->latency);
3508}
3509
3510static ssize_t
3511sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3512 struct device_attribute *attr, char *buf)
3513{
3514 struct sdhci_host *host = dev_get_drvdata(dev);
3515 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3516 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3517
3518 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3519}
3520
3521static ssize_t
3522sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3523 struct device_attribute *attr, const char *buf, size_t count)
3524{
3525 struct sdhci_host *host = dev_get_drvdata(dev);
3526 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3527 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3528 uint32_t value;
3529 bool enable;
3530 int ret;
3531
3532 ret = kstrtou32(buf, 0, &value);
3533 if (ret)
3534 goto out;
3535 enable = !!value;
3536
3537 if (enable == msm_host->pm_qos_irq.enabled)
3538 goto out;
3539
3540 msm_host->pm_qos_irq.enabled = enable;
3541 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303542 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003543 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3544 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3545 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3546 msm_host->pm_qos_irq.latency);
3547 }
3548
3549out:
3550 return count;
3551}
3552
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003553#ifdef CONFIG_SMP
3554static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3555 struct sdhci_host *host)
3556{
3557 msm_host->pm_qos_irq.req.irq = host->irq;
3558}
3559#else
3560static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3561 struct sdhci_host *host) { }
3562#endif
3563
Gilad Broner44445992015-09-29 16:05:39 +03003564void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3565{
3566 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3567 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3568 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003569 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003570
3571 if (!msm_host->pdata->pm_qos_data.irq_valid)
3572 return;
3573
3574 /* Initialize only once as this gets called per partition */
3575 if (msm_host->pm_qos_irq.enabled)
3576 return;
3577
3578 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3579 msm_host->pm_qos_irq.req.type =
3580 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003581 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3582 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3583 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003584 else
3585 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3586 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3587
Asutosh Das36c2e922015-12-01 12:19:58 +05303588 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003589 sdhci_msm_pm_qos_irq_unvote_work);
3590 /* For initialization phase, set the performance latency */
3591 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3592 msm_host->pm_qos_irq.latency =
3593 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3594 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3595 msm_host->pm_qos_irq.latency);
3596 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003597
3598 /* sysfs */
3599 msm_host->pm_qos_irq.enable_attr.show =
3600 sdhci_msm_pm_qos_irq_enable_show;
3601 msm_host->pm_qos_irq.enable_attr.store =
3602 sdhci_msm_pm_qos_irq_enable_store;
3603 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3604 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3605 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3606 ret = device_create_file(&msm_host->pdev->dev,
3607 &msm_host->pm_qos_irq.enable_attr);
3608 if (ret)
3609 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3610 __func__, ret);
3611
3612 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3613 msm_host->pm_qos_irq.status_attr.store = NULL;
3614 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3615 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3616 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3617 ret = device_create_file(&msm_host->pdev->dev,
3618 &msm_host->pm_qos_irq.status_attr);
3619 if (ret)
3620 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3621 __func__, ret);
3622}
3623
3624static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3625 struct device_attribute *attr, char *buf)
3626{
3627 struct sdhci_host *host = dev_get_drvdata(dev);
3628 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3629 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3630 struct sdhci_msm_pm_qos_group *group;
3631 int i;
3632 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3633 int offset = 0;
3634
3635 for (i = 0; i < nr_groups; i++) {
3636 group = &msm_host->pm_qos[i];
3637 offset += snprintf(&buf[offset], PAGE_SIZE,
3638 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3639 i, group->req.cpus_affine.bits[0],
3640 msm_host->pm_qos_group_enable,
3641 atomic_read(&group->counter),
3642 group->latency);
3643 }
3644
3645 return offset;
3646}
3647
3648static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3649 struct device_attribute *attr, char *buf)
3650{
3651 struct sdhci_host *host = dev_get_drvdata(dev);
3652 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3653 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3654
3655 return snprintf(buf, PAGE_SIZE, "%s\n",
3656 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3657}
3658
3659static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3660 struct device_attribute *attr, const char *buf, size_t count)
3661{
3662 struct sdhci_host *host = dev_get_drvdata(dev);
3663 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3664 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3665 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3666 uint32_t value;
3667 bool enable;
3668 int ret;
3669 int i;
3670
3671 ret = kstrtou32(buf, 0, &value);
3672 if (ret)
3673 goto out;
3674 enable = !!value;
3675
3676 if (enable == msm_host->pm_qos_group_enable)
3677 goto out;
3678
3679 msm_host->pm_qos_group_enable = enable;
3680 if (!enable) {
3681 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303682 cancel_delayed_work_sync(
3683 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003684 atomic_set(&msm_host->pm_qos[i].counter, 0);
3685 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3686 pm_qos_update_request(&msm_host->pm_qos[i].req,
3687 msm_host->pm_qos[i].latency);
3688 }
3689 }
3690
3691out:
3692 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003693}
3694
3695static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3696{
3697 int i;
3698 struct sdhci_msm_cpu_group_map *map =
3699 &msm_host->pdata->pm_qos_data.cpu_group_map;
3700
3701 if (cpu < 0)
3702 goto not_found;
3703
3704 for (i = 0; i < map->nr_groups; i++)
3705 if (cpumask_test_cpu(cpu, &map->mask[i]))
3706 return i;
3707
3708not_found:
3709 return -EINVAL;
3710}
3711
3712void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3713 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3714{
3715 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3716 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3717 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3718 struct sdhci_msm_pm_qos_group *pm_qos_group;
3719 int counter;
3720
3721 if (!msm_host->pm_qos_group_enable || group < 0)
3722 return;
3723
3724 pm_qos_group = &msm_host->pm_qos[group];
3725 counter = atomic_inc_return(&pm_qos_group->counter);
3726
3727 /* Make sure to update the voting in case power policy has changed */
3728 if (pm_qos_group->latency == latency->latency[host->power_policy]
3729 && counter > 1)
3730 return;
3731
Asutosh Das36c2e922015-12-01 12:19:58 +05303732 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003733
3734 pm_qos_group->latency = latency->latency[host->power_policy];
3735 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3736}
3737
3738static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3739{
3740 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303741 container_of(work, struct sdhci_msm_pm_qos_group,
3742 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003743
3744 if (atomic_read(&group->counter))
3745 return;
3746
3747 group->latency = PM_QOS_DEFAULT_VALUE;
3748 pm_qos_update_request(&group->req, group->latency);
3749}
3750
Gilad Broner07d92eb2015-09-29 16:57:21 +03003751bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003752{
3753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3754 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3755 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3756
3757 if (!msm_host->pm_qos_group_enable || group < 0 ||
3758 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003759 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003760
3761 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303762 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3763 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003764 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003765 }
3766
3767 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3768 pm_qos_update_request(&msm_host->pm_qos[group].req,
3769 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003770 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003771}
3772
3773void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3774 struct sdhci_msm_pm_qos_latency *latency)
3775{
3776 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3777 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3778 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3779 struct sdhci_msm_pm_qos_group *group;
3780 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003781 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003782
3783 if (msm_host->pm_qos_group_enable)
3784 return;
3785
3786 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3787 GFP_KERNEL);
3788 if (!msm_host->pm_qos)
3789 return;
3790
3791 for (i = 0; i < nr_groups; i++) {
3792 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303793 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003794 sdhci_msm_pm_qos_cpu_unvote_work);
3795 atomic_set(&group->counter, 0);
3796 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3797 cpumask_copy(&group->req.cpus_affine,
3798 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3799 /* For initialization phase, set the performance mode latency */
3800 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3801 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3802 group->latency);
3803 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3804 __func__, i,
3805 group->req.cpus_affine.bits[0],
3806 group->latency,
3807 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3808 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003809 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003810 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003811
3812 /* sysfs */
3813 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3814 msm_host->pm_qos_group_status_attr.store = NULL;
3815 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3816 msm_host->pm_qos_group_status_attr.attr.name =
3817 "pm_qos_cpu_groups_status";
3818 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3819 ret = device_create_file(&msm_host->pdev->dev,
3820 &msm_host->pm_qos_group_status_attr);
3821 if (ret)
3822 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3823 __func__, ret);
3824 msm_host->pm_qos_group_enable_attr.show =
3825 sdhci_msm_pm_qos_group_enable_show;
3826 msm_host->pm_qos_group_enable_attr.store =
3827 sdhci_msm_pm_qos_group_enable_store;
3828 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3829 msm_host->pm_qos_group_enable_attr.attr.name =
3830 "pm_qos_cpu_groups_enable";
3831 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3832 ret = device_create_file(&msm_host->pdev->dev,
3833 &msm_host->pm_qos_group_enable_attr);
3834 if (ret)
3835 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3836 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003837}
3838
Gilad Broner07d92eb2015-09-29 16:57:21 +03003839static void sdhci_msm_pre_req(struct sdhci_host *host,
3840 struct mmc_request *mmc_req)
3841{
3842 int cpu;
3843 int group;
3844 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3845 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3846 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3847 msm_host->pm_qos_prev_cpu);
3848
3849 sdhci_msm_pm_qos_irq_vote(host);
3850
3851 cpu = get_cpu();
3852 put_cpu();
3853 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3854 if (group < 0)
3855 return;
3856
3857 if (group != prev_group && prev_group >= 0) {
3858 sdhci_msm_pm_qos_cpu_unvote(host,
3859 msm_host->pm_qos_prev_cpu, false);
3860 prev_group = -1; /* make sure to vote for new group */
3861 }
3862
3863 if (prev_group < 0) {
3864 sdhci_msm_pm_qos_cpu_vote(host,
3865 msm_host->pdata->pm_qos_data.latency, cpu);
3866 msm_host->pm_qos_prev_cpu = cpu;
3867 }
3868}
3869
3870static void sdhci_msm_post_req(struct sdhci_host *host,
3871 struct mmc_request *mmc_req)
3872{
3873 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3874 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3875
3876 sdhci_msm_pm_qos_irq_unvote(host, false);
3877
3878 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3879 msm_host->pm_qos_prev_cpu = -1;
3880}
3881
3882static void sdhci_msm_init(struct sdhci_host *host)
3883{
3884 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3885 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3886
3887 sdhci_msm_pm_qos_irq_init(host);
3888
3889 if (msm_host->pdata->pm_qos_data.legacy_valid)
3890 sdhci_msm_pm_qos_cpu_init(host,
3891 msm_host->pdata->pm_qos_data.latency);
3892}
3893
Sahitya Tummala9150a942014-10-31 15:33:04 +05303894static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3895{
3896 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3897 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3898 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3899 u32 max_curr = 0;
3900
3901 if (curr_slot && curr_slot->vdd_data)
3902 max_curr = curr_slot->vdd_data->hpm_uA;
3903
3904 return max_curr;
3905}
3906
Asutosh Das0ef24812012-12-18 16:14:02 +05303907static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303908 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303909 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003910 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303911 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003912 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303913 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303914 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303915 .get_min_clock = sdhci_msm_get_min_clock,
3916 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303917 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303918 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303919 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003920 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003921 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003922 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303923 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303924 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003925 .init = sdhci_msm_init,
3926 .pre_req = sdhci_msm_pre_req,
3927 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303928 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303929};
3930
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303931static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3932 struct sdhci_host *host)
3933{
Krishna Konda46fd1432014-10-30 21:13:27 -07003934 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303935 u16 minor;
3936 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303937 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303938 const struct sdhci_msm_offset *msm_host_offset =
3939 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303940
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303941 version = sdhci_msm_readl_relaxed(host,
3942 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303943 major = (version & CORE_VERSION_MAJOR_MASK) >>
3944 CORE_VERSION_MAJOR_SHIFT;
3945 minor = version & CORE_VERSION_TARGET_MASK;
3946
Krishna Konda46fd1432014-10-30 21:13:27 -07003947 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3948
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303949 /*
3950 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003951 * controller won't advertise 3.0v, 1.8v and 8-bit features
3952 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303953 */
3954 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003955 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003956 /*
3957 * Enable 1.8V support capability on controllers that
3958 * support dual voltage
3959 */
3960 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003961 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3962 caps |= CORE_3_0V_SUPPORT;
3963 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003964 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303965 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3966 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303967 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003968
3969 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303970 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3971 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3972 */
3973 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303974 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303975 val = readl_relaxed(host->ioaddr +
3976 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303977 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303978 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303979 }
3980 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003981 * SDCC 5 controller with major version 1, minor version 0x34 and later
3982 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3983 */
3984 if ((major == 1) && (minor < 0x34))
3985 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003986
3987 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003988 * SDCC 5 controller with major version 1, minor version 0x42 and later
3989 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303990 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003991 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303992 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003993 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303994 msm_host->enhanced_strobe = true;
3995 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003996
3997 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003998 * SDCC 5 controller with major version 1 and minor version 0x42,
3999 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4000 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304001 * when MCLK is gated OFF, it is not gated for less than 0.5us
4002 * and MCLK must be switched on for at-least 1us before DATA
4003 * starts coming.
4004 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004005 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
4006 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304007 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004008
Pavan Anamula5a256df2015-10-16 14:38:28 +05304009 /* Fake 3.0V support for SDIO devices which requires such voltage */
4010 if (msm_host->pdata->core_3_0v_support) {
4011 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304012 writel_relaxed((readl_relaxed(host->ioaddr +
4013 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4014 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304015 }
4016
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004017 if ((major == 1) && (minor >= 0x49))
4018 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304019 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004020 * Mask 64-bit support for controller with 32-bit address bus so that
4021 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004022 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004023 if (!msm_host->pdata->largeaddressbus)
4024 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4025
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304026 writel_relaxed(caps, host->ioaddr +
4027 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004028 /* keep track of the value in SDHCI_CAPABILITIES */
4029 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304030
4031 if ((major == 1) && (minor >= 0x6b))
4032 msm_host->ice_hci_support = true;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304033}
4034
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004035#ifdef CONFIG_MMC_CQ_HCI
4036static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4037 struct platform_device *pdev)
4038{
4039 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4040 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4041
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304042 if (nocmdq) {
4043 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4044 return;
4045 }
4046
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004047 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004048 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004049 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4050 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004051 host->cq_host = NULL;
4052 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004053 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004054 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004055}
4056#else
4057static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4058 struct platform_device *pdev)
4059{
4060
4061}
4062#endif
4063
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004064static bool sdhci_msm_is_bootdevice(struct device *dev)
4065{
4066 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4067 strlen(saved_command_line))) {
4068 char search_string[50];
4069
4070 snprintf(search_string, ARRAY_SIZE(search_string),
4071 "androidboot.bootdevice=%s", dev_name(dev));
4072 if (strnstr(saved_command_line, search_string,
4073 strlen(saved_command_line)))
4074 return true;
4075 else
4076 return false;
4077 }
4078
4079 /*
4080 * "androidboot.bootdevice=" argument is not present then
4081 * return true as we don't know the boot device anyways.
4082 */
4083 return true;
4084}
4085
Asutosh Das0ef24812012-12-18 16:14:02 +05304086static int sdhci_msm_probe(struct platform_device *pdev)
4087{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304088 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304089 struct sdhci_host *host;
4090 struct sdhci_pltfm_host *pltfm_host;
4091 struct sdhci_msm_host *msm_host;
4092 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004093 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004094 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004095 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304096 struct resource *tlmm_memres = NULL;
4097 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304098 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304099
4100 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4101 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4102 GFP_KERNEL);
4103 if (!msm_host) {
4104 ret = -ENOMEM;
4105 goto out;
4106 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304107
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304108 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4109 msm_host->mci_removed = true;
4110 msm_host->offset = &sdhci_msm_offset_mci_removed;
4111 } else {
4112 msm_host->mci_removed = false;
4113 msm_host->offset = &sdhci_msm_offset_mci_present;
4114 }
4115 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304116 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4117 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4118 if (IS_ERR(host)) {
4119 ret = PTR_ERR(host);
4120 goto out;
4121 }
4122
4123 pltfm_host = sdhci_priv(host);
4124 pltfm_host->priv = msm_host;
4125 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304126 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304127
4128 /* Extract platform data */
4129 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004130 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304131 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004132 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4133 ret);
4134 goto pltfm_free;
4135 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004136
4137 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004138 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4139 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004140 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004141 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004142
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004143 if (disable_slots & (1 << (ret - 1))) {
4144 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4145 ret);
4146 ret = -ENODEV;
4147 goto pltfm_free;
4148 }
4149
Sayali Lokhande5f768322016-04-11 18:36:53 +05304150 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004151 sdhci_slot[ret-1] = msm_host;
4152
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004153 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4154 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304155 if (!msm_host->pdata) {
4156 dev_err(&pdev->dev, "DT parsing error\n");
4157 goto pltfm_free;
4158 }
4159 } else {
4160 dev_err(&pdev->dev, "No device tree node\n");
4161 goto pltfm_free;
4162 }
4163
4164 /* Setup Clocks */
4165
4166 /* Setup SDCC bus voter clock. */
4167 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4168 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4169 /* Vote for max. clk rate for max. performance */
4170 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4171 if (ret)
4172 goto pltfm_free;
4173 ret = clk_prepare_enable(msm_host->bus_clk);
4174 if (ret)
4175 goto pltfm_free;
4176 }
4177
4178 /* Setup main peripheral bus clock */
4179 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4180 if (!IS_ERR(msm_host->pclk)) {
4181 ret = clk_prepare_enable(msm_host->pclk);
4182 if (ret)
4183 goto bus_clk_disable;
4184 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304185 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304186
4187 /* Setup SDC MMC clock */
4188 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4189 if (IS_ERR(msm_host->clk)) {
4190 ret = PTR_ERR(msm_host->clk);
4191 goto pclk_disable;
4192 }
4193
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304194 /* Set to the minimum supported clock frequency */
4195 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4196 if (ret) {
4197 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304198 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304199 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304200 ret = clk_prepare_enable(msm_host->clk);
4201 if (ret)
4202 goto pclk_disable;
4203
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304204 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304205 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304206
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004207 /* Setup CDC calibration fixed feedback clock */
4208 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4209 if (!IS_ERR(msm_host->ff_clk)) {
4210 ret = clk_prepare_enable(msm_host->ff_clk);
4211 if (ret)
4212 goto clk_disable;
4213 }
4214
4215 /* Setup CDC calibration sleep clock */
4216 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4217 if (!IS_ERR(msm_host->sleep_clk)) {
4218 ret = clk_prepare_enable(msm_host->sleep_clk);
4219 if (ret)
4220 goto ff_clk_disable;
4221 }
4222
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004223 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4224
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304225 ret = sdhci_msm_bus_register(msm_host, pdev);
4226 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004227 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304228
4229 if (msm_host->msm_bus_vote.client_handle)
4230 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4231 sdhci_msm_bus_work);
4232 sdhci_msm_bus_voting(host, 1);
4233
Asutosh Das0ef24812012-12-18 16:14:02 +05304234 /* Setup regulators */
4235 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4236 if (ret) {
4237 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304238 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304239 }
4240
4241 /* Reset the core and Enable SDHC mode */
4242 core_memres = platform_get_resource_byname(pdev,
4243 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304244 if (!msm_host->mci_removed) {
4245 if (!core_memres) {
4246 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4247 goto vreg_deinit;
4248 }
4249 msm_host->core_mem = devm_ioremap(&pdev->dev,
4250 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304251
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304252 if (!msm_host->core_mem) {
4253 dev_err(&pdev->dev, "Failed to remap registers\n");
4254 ret = -ENOMEM;
4255 goto vreg_deinit;
4256 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304257 }
4258
Sahitya Tummala079ed852015-10-29 20:18:45 +05304259 tlmm_memres = platform_get_resource_byname(pdev,
4260 IORESOURCE_MEM, "tlmm_mem");
4261 if (tlmm_memres) {
4262 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4263 resource_size(tlmm_memres));
4264
4265 if (!tlmm_mem) {
4266 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4267 ret = -ENOMEM;
4268 goto vreg_deinit;
4269 }
4270 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4271 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4272 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4273 }
4274
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304275 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004276 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304277 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004278 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304279 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304280
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304281 if (!msm_host->mci_removed) {
4282 /* Set HC_MODE_EN bit in HC_MODE register */
4283 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304284
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304285 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4286 writel_relaxed(readl_relaxed(msm_host->core_mem +
4287 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4288 msm_host->core_mem + CORE_HC_MODE);
4289 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304290 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004291
4292 /*
4293 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4294 * be used as required later on.
4295 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304296 writel_relaxed((readl_relaxed(host->ioaddr +
4297 msm_host_offset->CORE_VENDOR_SPEC) |
4298 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4299 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304300 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304301 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4302 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4303 * interrupt in GIC (by registering the interrupt handler), we need to
4304 * ensure that any pending power irq interrupt status is acknowledged
4305 * otherwise power irq interrupt handler would be fired prematurely.
4306 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304307 irq_status = sdhci_msm_readl_relaxed(host,
4308 msm_host_offset->CORE_PWRCTL_STATUS);
4309 sdhci_msm_writel_relaxed(irq_status, host,
4310 msm_host_offset->CORE_PWRCTL_CLEAR);
4311 irq_ctl = sdhci_msm_readl_relaxed(host,
4312 msm_host_offset->CORE_PWRCTL_CTL);
4313
Subhash Jadavani28137342013-05-14 17:46:43 +05304314 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4315 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4316 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4317 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304318 sdhci_msm_writel_relaxed(irq_ctl, host,
4319 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004320
Subhash Jadavani28137342013-05-14 17:46:43 +05304321 /*
4322 * Ensure that above writes are propogated before interrupt enablement
4323 * in GIC.
4324 */
4325 mb();
4326
4327 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304328 * Following are the deviations from SDHC spec v3.0 -
4329 * 1. Card detection is handled using separate GPIO.
4330 * 2. Bus power control is handled by interacting with PMIC.
4331 */
4332 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4333 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304334 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004335 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304336 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304337 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304338 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304339 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304340 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304341 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304342
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304343 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4344 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4345
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004346 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004347 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4348 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4349 SDHCI_VENDOR_VER_SHIFT));
4350 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4351 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4352 /*
4353 * Add 40us delay in interrupt handler when
4354 * operating at initialization frequency(400KHz).
4355 */
4356 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4357 /*
4358 * Set Software Reset for DAT line in Software
4359 * Reset Register (Bit 2).
4360 */
4361 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4362 }
4363
Asutosh Das214b9662013-06-13 14:27:42 +05304364 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4365
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004366 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004367 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4368 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304369 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004370 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304371 goto vreg_deinit;
4372 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004373 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304374 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004375 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304376 if (ret) {
4377 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004378 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304379 goto vreg_deinit;
4380 }
4381
4382 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304383 sdhci_msm_writel_relaxed(INT_MASK, host,
4384 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304385
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304386#ifdef CONFIG_MMC_CLKGATE
4387 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4388 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4389#endif
4390
Asutosh Das0ef24812012-12-18 16:14:02 +05304391 /* Set host capabilities */
4392 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4393 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004394 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304395 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304396 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004397 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004398 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004399 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304400 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004401 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004402 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304403 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304404
4405 if (msm_host->pdata->nonremovable)
4406 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4407
Guoping Yuf7c91332014-08-20 16:56:18 +08004408 if (msm_host->pdata->nonhotplug)
4409 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4410
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304411 init_completion(&msm_host->pwr_irq_completion);
4412
Sahitya Tummala581df132013-03-12 14:57:46 +05304413 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304414 /*
4415 * Set up the card detect GPIO in active configuration before
4416 * configuring it as an IRQ. Otherwise, it can be in some
4417 * weird/inconsistent state resulting in flood of interrupts.
4418 */
4419 sdhci_msm_setup_pins(msm_host->pdata, true);
4420
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304421 /*
4422 * This delay is needed for stabilizing the card detect GPIO
4423 * line after changing the pull configs.
4424 */
4425 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304426 ret = mmc_gpio_request_cd(msm_host->mmc,
4427 msm_host->pdata->status_gpio, 0);
4428 if (ret) {
4429 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4430 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304431 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304432 }
4433 }
4434
Krishna Konda7feab352013-09-17 23:55:40 -07004435 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4436 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4437 host->dma_mask = DMA_BIT_MASK(64);
4438 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304439 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004440 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304441 host->dma_mask = DMA_BIT_MASK(32);
4442 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304443 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304444 } else {
4445 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4446 }
4447
Ritesh Harjani42876f42015-11-17 17:46:51 +05304448 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4449 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304450 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304451 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4452 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304453 msm_host->is_sdiowakeup_enabled = true;
4454 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4455 sdhci_msm_sdiowakeup_irq,
4456 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4457 "sdhci-msm sdiowakeup", host);
4458 if (ret) {
4459 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4460 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4461 msm_host->pdata->sdiowakeup_irq = -1;
4462 msm_host->is_sdiowakeup_enabled = false;
4463 goto vreg_deinit;
4464 } else {
4465 spin_lock_irqsave(&host->lock, flags);
4466 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304467 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304468 spin_unlock_irqrestore(&host->lock, flags);
4469 }
4470 }
4471
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004472 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304473 ret = sdhci_add_host(host);
4474 if (ret) {
4475 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304476 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304477 }
4478
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004479 pm_runtime_set_active(&pdev->dev);
4480 pm_runtime_enable(&pdev->dev);
4481 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4482 pm_runtime_use_autosuspend(&pdev->dev);
4483
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304484 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4485 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4486 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4487 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4488 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4489 ret = device_create_file(&pdev->dev,
4490 &msm_host->msm_bus_vote.max_bus_bw);
4491 if (ret)
4492 goto remove_host;
4493
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304494 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4495 msm_host->polling.show = show_polling;
4496 msm_host->polling.store = store_polling;
4497 sysfs_attr_init(&msm_host->polling.attr);
4498 msm_host->polling.attr.name = "polling";
4499 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4500 ret = device_create_file(&pdev->dev, &msm_host->polling);
4501 if (ret)
4502 goto remove_max_bus_bw_file;
4503 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304504
4505 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4506 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4507 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4508 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4509 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4510 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4511 if (ret) {
4512 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4513 mmc_hostname(host->mmc), __func__, ret);
4514 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4515 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304516 /* Successful initialization */
4517 goto out;
4518
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304519remove_max_bus_bw_file:
4520 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304521remove_host:
4522 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004523 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304524 sdhci_remove_host(host, dead);
4525vreg_deinit:
4526 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304527bus_unregister:
4528 if (msm_host->msm_bus_vote.client_handle)
4529 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4530 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004531sleep_clk_disable:
4532 if (!IS_ERR(msm_host->sleep_clk))
4533 clk_disable_unprepare(msm_host->sleep_clk);
4534ff_clk_disable:
4535 if (!IS_ERR(msm_host->ff_clk))
4536 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304537clk_disable:
4538 if (!IS_ERR(msm_host->clk))
4539 clk_disable_unprepare(msm_host->clk);
4540pclk_disable:
4541 if (!IS_ERR(msm_host->pclk))
4542 clk_disable_unprepare(msm_host->pclk);
4543bus_clk_disable:
4544 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4545 clk_disable_unprepare(msm_host->bus_clk);
4546pltfm_free:
4547 sdhci_pltfm_free(pdev);
4548out:
4549 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4550 return ret;
4551}
4552
4553static int sdhci_msm_remove(struct platform_device *pdev)
4554{
4555 struct sdhci_host *host = platform_get_drvdata(pdev);
4556 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4557 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4558 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4559 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4560 0xffffffff);
4561
4562 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304563 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4564 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304565 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004566 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304567 sdhci_remove_host(host, dead);
4568 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304569
Asutosh Das0ef24812012-12-18 16:14:02 +05304570 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304571
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304572 sdhci_msm_setup_pins(pdata, true);
4573 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304574
4575 if (msm_host->msm_bus_vote.client_handle) {
4576 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4577 sdhci_msm_bus_unregister(msm_host);
4578 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304579 return 0;
4580}
4581
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004582#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304583static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4584{
4585 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4586 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4587 unsigned long flags;
4588 int ret = 0;
4589
4590 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4591 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4592 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304593 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304594 return 1;
4595 }
4596
4597 spin_lock_irqsave(&host->lock, flags);
4598 if (enable) {
4599 /* configure DAT1 gpio if applicable */
4600 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304601 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304602 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4603 if (!ret)
4604 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4605 goto out;
4606 } else {
4607 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4608 mmc_hostname(host->mmc), enable);
4609 }
4610 } else {
4611 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4612 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4613 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304614 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304615 } else {
4616 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4617 mmc_hostname(host->mmc), enable);
4618
4619 }
4620 }
4621out:
4622 if (ret)
4623 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4624 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4625 ret, msm_host->pdata->sdiowakeup_irq);
4626 spin_unlock_irqrestore(&host->lock, flags);
4627 return ret;
4628}
4629
4630
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004631static int sdhci_msm_runtime_suspend(struct device *dev)
4632{
4633 struct sdhci_host *host = dev_get_drvdata(dev);
4634 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4635 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004636 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004637
Ritesh Harjani42876f42015-11-17 17:46:51 +05304638 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4639 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304640
Ritesh Harjani42876f42015-11-17 17:46:51 +05304641 sdhci_cfg_irq(host, false, true);
4642
4643defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004644 disable_irq(msm_host->pwr_irq);
4645
4646 /*
4647 * Remove the vote immediately only if clocks are off in which
4648 * case we might have queued work to remove vote but it may not
4649 * be completed before runtime suspend or system suspend.
4650 */
4651 if (!atomic_read(&msm_host->clks_on)) {
4652 if (msm_host->msm_bus_vote.client_handle)
4653 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4654 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004655 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4656 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004657
4658 return 0;
4659}
4660
4661static int sdhci_msm_runtime_resume(struct device *dev)
4662{
4663 struct sdhci_host *host = dev_get_drvdata(dev);
4664 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4665 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004666 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004667
Ritesh Harjani42876f42015-11-17 17:46:51 +05304668 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4669 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304670
Ritesh Harjani42876f42015-11-17 17:46:51 +05304671 sdhci_cfg_irq(host, true, true);
4672
4673defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004674 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004675
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004676 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4677 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004678 return 0;
4679}
4680
4681static int sdhci_msm_suspend(struct device *dev)
4682{
4683 struct sdhci_host *host = dev_get_drvdata(dev);
4684 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4685 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004686 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304687 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004688 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004689
4690 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4691 (msm_host->mmc->slot.cd_irq >= 0))
4692 disable_irq(msm_host->mmc->slot.cd_irq);
4693
4694 if (pm_runtime_suspended(dev)) {
4695 pr_debug("%s: %s: already runtime suspended\n",
4696 mmc_hostname(host->mmc), __func__);
4697 goto out;
4698 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004699 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004700out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05304701 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304702 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4703 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4704 if (sdio_cfg)
4705 sdhci_cfg_irq(host, false, true);
4706 }
4707
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004708 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4709 ktime_to_us(ktime_sub(ktime_get(), start)));
4710 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004711}
4712
4713static int sdhci_msm_resume(struct device *dev)
4714{
4715 struct sdhci_host *host = dev_get_drvdata(dev);
4716 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4717 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4718 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304719 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004720 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004721
4722 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4723 (msm_host->mmc->slot.cd_irq >= 0))
4724 enable_irq(msm_host->mmc->slot.cd_irq);
4725
4726 if (pm_runtime_suspended(dev)) {
4727 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4728 mmc_hostname(host->mmc), __func__);
4729 goto out;
4730 }
4731
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004732 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004733out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304734 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4735 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4736 if (sdio_cfg)
4737 sdhci_cfg_irq(host, true, true);
4738 }
4739
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004740 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4741 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004742 return ret;
4743}
4744
Ritesh Harjani42876f42015-11-17 17:46:51 +05304745static int sdhci_msm_suspend_noirq(struct device *dev)
4746{
4747 struct sdhci_host *host = dev_get_drvdata(dev);
4748 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4749 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4750 int ret = 0;
4751
4752 /*
4753 * ksdioirqd may be running, hence retry
4754 * suspend in case the clocks are ON
4755 */
4756 if (atomic_read(&msm_host->clks_on)) {
4757 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4758 mmc_hostname(host->mmc), __func__);
4759 ret = -EAGAIN;
4760 }
4761
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304762 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4763 if (msm_host->sdio_pending_processing)
4764 ret = -EBUSY;
4765
Ritesh Harjani42876f42015-11-17 17:46:51 +05304766 return ret;
4767}
4768
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004769static const struct dev_pm_ops sdhci_msm_pmops = {
4770 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4771 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4772 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304773 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004774};
4775
4776#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4777
4778#else
4779#define SDHCI_MSM_PMOPS NULL
4780#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304781static const struct of_device_id sdhci_msm_dt_match[] = {
4782 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304783 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004784 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304785};
4786MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4787
4788static struct platform_driver sdhci_msm_driver = {
4789 .probe = sdhci_msm_probe,
4790 .remove = sdhci_msm_remove,
4791 .driver = {
4792 .name = "sdhci_msm",
4793 .owner = THIS_MODULE,
4794 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004795 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304796 },
4797};
4798
4799module_platform_driver(sdhci_msm_driver);
4800
4801MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4802MODULE_LICENSE("GPL v2");