blob: ce83c5947d65dfc01db8bf7e7afde6bfb126e2a0 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Sayali Lokhandebff771e2016-11-30 11:35:22 +05305 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020042#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053043
Sahitya Tummala56874732015-05-21 08:24:03 +053044#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070045#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053046
Asutosh Das36c2e922015-12-01 12:19:58 +053047#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080048#define CORE_POWER 0x0
49#define CORE_SW_RST (1 << 7)
50
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070051#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080052
53#define CORE_VERSION_STEP_MASK 0x0000FFFF
54#define CORE_VERSION_MINOR_MASK 0x0FFF0000
55#define CORE_VERSION_MINOR_SHIFT 16
56#define CORE_VERSION_MAJOR_MASK 0xF0000000
57#define CORE_VERSION_MAJOR_SHIFT 28
58#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030059#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080060
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053062
63#define CORE_VERSION_MAJOR_MASK 0xF0000000
64#define CORE_VERSION_MAJOR_SHIFT 28
65
Asutosh Das0ef24812012-12-18 16:14:02 +053066#define CORE_HC_MODE 0x78
67#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070068#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053069
Asutosh Das0ef24812012-12-18 16:14:02 +053070#define CORE_PWRCTL_BUS_OFF 0x01
71#define CORE_PWRCTL_BUS_ON (1 << 1)
72#define CORE_PWRCTL_IO_LOW (1 << 2)
73#define CORE_PWRCTL_IO_HIGH (1 << 3)
74
75#define CORE_PWRCTL_BUS_SUCCESS 0x01
76#define CORE_PWRCTL_BUS_FAIL (1 << 1)
77#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
78#define CORE_PWRCTL_IO_FAIL (1 << 3)
79
80#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070081#define MAX_PHASES 16
82
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070083#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070084#define CORE_DLL_EN (1 << 16)
85#define CORE_CDR_EN (1 << 17)
86#define CORE_CK_OUT_EN (1 << 18)
87#define CORE_CDR_EXT_EN (1 << 19)
88#define CORE_DLL_PDN (1 << 29)
89#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070090
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070091#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070092#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070093
Krishna Konda46fd1432014-10-30 21:13:27 -070094#define CORE_CLK_PWRSAVE (1 << 1)
95#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
96#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
97#define CORE_HC_MCLK_SEL_MASK (3 << 8)
98#define CORE_HC_AUTO_CMD21_EN (1 << 6)
99#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700100#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700101#define CORE_HC_SELECT_IN_EN (1 << 18)
102#define CORE_HC_SELECT_IN_HS400 (6 << 19)
103#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700104#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105
Pavan Anamula691dd592015-08-25 16:11:20 +0530106#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
107#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530108#define CORE_ONE_MID_EN (1 << 25)
109
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530110#define CORE_8_BIT_SUPPORT (1 << 18)
111#define CORE_3_3V_SUPPORT (1 << 24)
112#define CORE_3_0V_SUPPORT (1 << 25)
113#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300114#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700115
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700116#define CORE_CSR_CDC_CTLR_CFG0 0x130
117#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
118#define CORE_HW_AUTOCAL_ENA (1 << 17)
119
120#define CORE_CSR_CDC_CTLR_CFG1 0x134
121#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
122#define CORE_TIMER_ENA (1 << 16)
123
124#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
125#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
126#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
127#define CORE_CDC_OFFSET_CFG 0x14C
128#define CORE_CSR_CDC_DELAY_CFG 0x150
129#define CORE_CDC_SLAVE_DDA_CFG 0x160
130#define CORE_CSR_CDC_STATUS0 0x164
131#define CORE_CALIBRATION_DONE (1 << 0)
132
133#define CORE_CDC_ERROR_CODE_MASK 0x7000000
134
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300135#define CQ_CMD_DBG_RAM 0x110
136#define CQ_CMD_DBG_RAM_WA 0x150
137#define CQ_CMD_DBG_RAM_OL 0x154
138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_GEN_CFG 0x178
140#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
141#define CORE_CDC_SWITCH_RC_EN (1 << 1)
142
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700143#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530144#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700147#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530148#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700150#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800151#define CORE_FLL_CYCLE_CNT (1 << 18)
152#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530154#define DDR_CONFIG_POR_VAL 0x80040853
155#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
156#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700157#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700158
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700159/* 512 descriptors */
160#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530161#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530162
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700163#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800164#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700165
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700166#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530167#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700168
Krishna Konda96e6b112013-10-28 15:25:03 -0700169#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200170#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200171#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700172
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530173struct sdhci_msm_offset {
174 u32 CORE_MCI_DATA_CNT;
175 u32 CORE_MCI_STATUS;
176 u32 CORE_MCI_FIFO_CNT;
177 u32 CORE_MCI_VERSION;
178 u32 CORE_GENERICS;
179 u32 CORE_TESTBUS_CONFIG;
180 u32 CORE_TESTBUS_SEL2_BIT;
181 u32 CORE_TESTBUS_ENA;
182 u32 CORE_TESTBUS_SEL2;
183 u32 CORE_PWRCTL_STATUS;
184 u32 CORE_PWRCTL_MASK;
185 u32 CORE_PWRCTL_CLEAR;
186 u32 CORE_PWRCTL_CTL;
187 u32 CORE_SDCC_DEBUG_REG;
188 u32 CORE_DLL_CONFIG;
189 u32 CORE_DLL_STATUS;
190 u32 CORE_VENDOR_SPEC;
191 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
192 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
193 u32 CORE_VENDOR_SPEC_FUNC2;
194 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
195 u32 CORE_DDR_200_CFG;
196 u32 CORE_VENDOR_SPEC3;
197 u32 CORE_DLL_CONFIG_2;
198 u32 CORE_DDR_CONFIG;
199 u32 CORE_DDR_CONFIG_2;
200};
201
202struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
203 .CORE_MCI_DATA_CNT = 0x35C,
204 .CORE_MCI_STATUS = 0x324,
205 .CORE_MCI_FIFO_CNT = 0x308,
206 .CORE_MCI_VERSION = 0x318,
207 .CORE_GENERICS = 0x320,
208 .CORE_TESTBUS_CONFIG = 0x32C,
209 .CORE_TESTBUS_SEL2_BIT = 3,
210 .CORE_TESTBUS_ENA = (1 << 31),
211 .CORE_TESTBUS_SEL2 = (1 << 3),
212 .CORE_PWRCTL_STATUS = 0x240,
213 .CORE_PWRCTL_MASK = 0x244,
214 .CORE_PWRCTL_CLEAR = 0x248,
215 .CORE_PWRCTL_CTL = 0x24C,
216 .CORE_SDCC_DEBUG_REG = 0x358,
217 .CORE_DLL_CONFIG = 0x200,
218 .CORE_DLL_STATUS = 0x208,
219 .CORE_VENDOR_SPEC = 0x20C,
220 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
221 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
222 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
223 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
224 .CORE_DDR_200_CFG = 0x224,
225 .CORE_VENDOR_SPEC3 = 0x250,
226 .CORE_DLL_CONFIG_2 = 0x254,
227 .CORE_DDR_CONFIG = 0x258,
228 .CORE_DDR_CONFIG_2 = 0x25C,
229};
230
231struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
232 .CORE_MCI_DATA_CNT = 0x30,
233 .CORE_MCI_STATUS = 0x34,
234 .CORE_MCI_FIFO_CNT = 0x44,
235 .CORE_MCI_VERSION = 0x050,
236 .CORE_GENERICS = 0x70,
237 .CORE_TESTBUS_CONFIG = 0x0CC,
238 .CORE_TESTBUS_SEL2_BIT = 4,
239 .CORE_TESTBUS_ENA = (1 << 3),
240 .CORE_TESTBUS_SEL2 = (1 << 4),
241 .CORE_PWRCTL_STATUS = 0xDC,
242 .CORE_PWRCTL_MASK = 0xE0,
243 .CORE_PWRCTL_CLEAR = 0xE4,
244 .CORE_PWRCTL_CTL = 0xE8,
245 .CORE_SDCC_DEBUG_REG = 0x124,
246 .CORE_DLL_CONFIG = 0x100,
247 .CORE_DLL_STATUS = 0x108,
248 .CORE_VENDOR_SPEC = 0x10C,
249 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
250 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
251 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
252 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
253 .CORE_DDR_200_CFG = 0x184,
254 .CORE_VENDOR_SPEC3 = 0x1B0,
255 .CORE_DLL_CONFIG_2 = 0x1B4,
256 .CORE_DDR_CONFIG = 0x1B8,
257 .CORE_DDR_CONFIG_2 = 0x1BC,
258};
259
260u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 struct sdhci_msm_host *msm_host = pltfm_host->priv;
264 void __iomem *base_addr;
265
266 if (msm_host->mci_removed)
267 base_addr = host->ioaddr;
268 else
269 base_addr = msm_host->core_mem;
270
271 return readb_relaxed(base_addr + offset);
272}
273
274u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
275{
276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
278 void __iomem *base_addr;
279
280 if (msm_host->mci_removed)
281 base_addr = host->ioaddr;
282 else
283 base_addr = msm_host->core_mem;
284
285 return readl_relaxed(base_addr + offset);
286}
287
288void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
289{
290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
291 struct sdhci_msm_host *msm_host = pltfm_host->priv;
292 void __iomem *base_addr;
293
294 if (msm_host->mci_removed)
295 base_addr = host->ioaddr;
296 else
297 base_addr = msm_host->core_mem;
298
299 writeb_relaxed(val, base_addr + offset);
300}
301
302void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
303{
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
306 void __iomem *base_addr;
307
308 if (msm_host->mci_removed)
309 base_addr = host->ioaddr;
310 else
311 base_addr = msm_host->core_mem;
312
313 writel_relaxed(val, base_addr + offset);
314}
315
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700316static const u32 tuning_block_64[] = {
317 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
318 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
319 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
320 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
321};
322
323static const u32 tuning_block_128[] = {
324 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
325 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
326 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
327 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
328 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
329 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
330 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
331 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
332};
Asutosh Das0ef24812012-12-18 16:14:02 +0530333
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700334/* global to hold each slot instance for debug */
335static struct sdhci_msm_host *sdhci_slot[2];
336
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700337static int disable_slots;
338/* root can write, others read */
339module_param(disable_slots, int, S_IRUGO|S_IWUSR);
340
Asutosh Das0ef24812012-12-18 16:14:02 +0530341enum vdd_io_level {
342 /* set vdd_io_data->low_vol_level */
343 VDD_IO_LOW,
344 /* set vdd_io_data->high_vol_level */
345 VDD_IO_HIGH,
346 /*
347 * set whatever there in voltage_level (third argument) of
348 * sdhci_msm_set_vdd_io_vol() function.
349 */
350 VDD_IO_SET_LEVEL,
351};
352
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700353/* MSM platform specific tuning */
354static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
355 u8 poll)
356{
357 int rc = 0;
358 u32 wait_cnt = 50;
359 u8 ck_out_en = 0;
360 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530361 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
362 struct sdhci_msm_host *msm_host = pltfm_host->priv;
363 const struct sdhci_msm_offset *msm_host_offset =
364 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700365
366 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530367 ck_out_en = !!(readl_relaxed(host->ioaddr +
368 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700369
370 while (ck_out_en != poll) {
371 if (--wait_cnt == 0) {
372 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
373 mmc_hostname(mmc), __func__, poll);
374 rc = -ETIMEDOUT;
375 goto out;
376 }
377 udelay(1);
378
379 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530380 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700381 }
382out:
383 return rc;
384}
385
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530386/*
387 * Enable CDR to track changes of DAT lines and adjust sampling
388 * point according to voltage/temperature variations
389 */
390static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
391{
392 int rc = 0;
393 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530394 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
395 struct sdhci_msm_host *msm_host = pltfm_host->priv;
396 const struct sdhci_msm_offset *msm_host_offset =
397 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530398
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530399 config = readl_relaxed(host->ioaddr +
400 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530401 config |= CORE_CDR_EN;
402 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530403 writel_relaxed(config, host->ioaddr +
404 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530405
406 rc = msm_dll_poll_ck_out_en(host, 0);
407 if (rc)
408 goto err;
409
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530410 writel_relaxed((readl_relaxed(host->ioaddr +
411 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
412 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530413
414 rc = msm_dll_poll_ck_out_en(host, 1);
415 if (rc)
416 goto err;
417 goto out;
418err:
419 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
420out:
421 return rc;
422}
423
424static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
425 *attr, const char *buf, size_t count)
426{
427 struct sdhci_host *host = dev_get_drvdata(dev);
428 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
429 struct sdhci_msm_host *msm_host = pltfm_host->priv;
430 u32 tmp;
431 unsigned long flags;
432
433 if (!kstrtou32(buf, 0, &tmp)) {
434 spin_lock_irqsave(&host->lock, flags);
435 msm_host->en_auto_cmd21 = !!tmp;
436 spin_unlock_irqrestore(&host->lock, flags);
437 }
438 return count;
439}
440
441static ssize_t show_auto_cmd21(struct device *dev,
442 struct device_attribute *attr, char *buf)
443{
444 struct sdhci_host *host = dev_get_drvdata(dev);
445 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
446 struct sdhci_msm_host *msm_host = pltfm_host->priv;
447
448 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
449}
450
451/* MSM auto-tuning handler */
452static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
453 bool enable,
454 u32 type)
455{
456 int rc = 0;
457 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
458 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530459 const struct sdhci_msm_offset *msm_host_offset =
460 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530461 u32 val = 0;
462
463 if (!msm_host->en_auto_cmd21)
464 return 0;
465
466 if (type == MMC_SEND_TUNING_BLOCK_HS200)
467 val = CORE_HC_AUTO_CMD21_EN;
468 else
469 return 0;
470
471 if (enable) {
472 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530473 writel_relaxed(readl_relaxed(host->ioaddr +
474 msm_host_offset->CORE_VENDOR_SPEC) | val,
475 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530476 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530477 writel_relaxed(readl_relaxed(host->ioaddr +
478 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
479 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530480 }
481 return rc;
482}
483
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700484static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
485{
486 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530487 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
488 struct sdhci_msm_host *msm_host = pltfm_host->priv;
489 const struct sdhci_msm_offset *msm_host_offset =
490 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700491 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
492 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
493 0x8};
494 unsigned long flags;
495 u32 config;
496 struct mmc_host *mmc = host->mmc;
497
498 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
499 spin_lock_irqsave(&host->lock, flags);
500
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530501 config = readl_relaxed(host->ioaddr +
502 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700503 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
504 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530505 writel_relaxed(config, host->ioaddr +
506 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700507
508 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
509 rc = msm_dll_poll_ck_out_en(host, 0);
510 if (rc)
511 goto err_out;
512
513 /*
514 * Write the selected DLL clock output phase (0 ... 15)
515 * to CDR_SELEXT bit field of DLL_CONFIG register.
516 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530517 writel_relaxed(((readl_relaxed(host->ioaddr +
518 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700519 & ~(0xF << 20))
520 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530521 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700522
523 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530524 writel_relaxed((readl_relaxed(host->ioaddr +
525 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
526 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700527
528 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
529 rc = msm_dll_poll_ck_out_en(host, 1);
530 if (rc)
531 goto err_out;
532
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530533 config = readl_relaxed(host->ioaddr +
534 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700535 config |= CORE_CDR_EN;
536 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530537 writel_relaxed(config, host->ioaddr +
538 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700539 goto out;
540
541err_out:
542 pr_err("%s: %s: Failed to set DLL phase: %d\n",
543 mmc_hostname(mmc), __func__, phase);
544out:
545 spin_unlock_irqrestore(&host->lock, flags);
546 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
547 return rc;
548}
549
550/*
551 * Find out the greatest range of consecuitive selected
552 * DLL clock output phases that can be used as sampling
553 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700554 * timing mode) or for eMMC4.5 card read operation (in
555 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700556 * Select the 3/4 of the range and configure the DLL with the
557 * selected DLL clock output phase.
558 */
559
560static int msm_find_most_appropriate_phase(struct sdhci_host *host,
561 u8 *phase_table, u8 total_phases)
562{
563 int ret;
564 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
565 u8 phases_per_row[MAX_PHASES] = {0};
566 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
567 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
568 bool phase_0_found = false, phase_15_found = false;
569 struct mmc_host *mmc = host->mmc;
570
571 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
572 if (!total_phases || (total_phases > MAX_PHASES)) {
573 pr_err("%s: %s: invalid argument: total_phases=%d\n",
574 mmc_hostname(mmc), __func__, total_phases);
575 return -EINVAL;
576 }
577
578 for (cnt = 0; cnt < total_phases; cnt++) {
579 ranges[row_index][col_index] = phase_table[cnt];
580 phases_per_row[row_index] += 1;
581 col_index++;
582
583 if ((cnt + 1) == total_phases) {
584 continue;
585 /* check if next phase in phase_table is consecutive or not */
586 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
587 row_index++;
588 col_index = 0;
589 }
590 }
591
592 if (row_index >= MAX_PHASES)
593 return -EINVAL;
594
595 /* Check if phase-0 is present in first valid window? */
596 if (!ranges[0][0]) {
597 phase_0_found = true;
598 phase_0_raw_index = 0;
599 /* Check if cycle exist between 2 valid windows */
600 for (cnt = 1; cnt <= row_index; cnt++) {
601 if (phases_per_row[cnt]) {
602 for (i = 0; i < phases_per_row[cnt]; i++) {
603 if (ranges[cnt][i] == 15) {
604 phase_15_found = true;
605 phase_15_raw_index = cnt;
606 break;
607 }
608 }
609 }
610 }
611 }
612
613 /* If 2 valid windows form cycle then merge them as single window */
614 if (phase_0_found && phase_15_found) {
615 /* number of phases in raw where phase 0 is present */
616 u8 phases_0 = phases_per_row[phase_0_raw_index];
617 /* number of phases in raw where phase 15 is present */
618 u8 phases_15 = phases_per_row[phase_15_raw_index];
619
620 if (phases_0 + phases_15 >= MAX_PHASES)
621 /*
622 * If there are more than 1 phase windows then total
623 * number of phases in both the windows should not be
624 * more than or equal to MAX_PHASES.
625 */
626 return -EINVAL;
627
628 /* Merge 2 cyclic windows */
629 i = phases_15;
630 for (cnt = 0; cnt < phases_0; cnt++) {
631 ranges[phase_15_raw_index][i] =
632 ranges[phase_0_raw_index][cnt];
633 if (++i >= MAX_PHASES)
634 break;
635 }
636
637 phases_per_row[phase_0_raw_index] = 0;
638 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
639 }
640
641 for (cnt = 0; cnt <= row_index; cnt++) {
642 if (phases_per_row[cnt] > curr_max) {
643 curr_max = phases_per_row[cnt];
644 selected_row_index = cnt;
645 }
646 }
647
648 i = ((curr_max * 3) / 4);
649 if (i)
650 i--;
651
652 ret = (int)ranges[selected_row_index][i];
653
654 if (ret >= MAX_PHASES) {
655 ret = -EINVAL;
656 pr_err("%s: %s: invalid phase selected=%d\n",
657 mmc_hostname(mmc), __func__, ret);
658 }
659
660 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
661 return ret;
662}
663
664static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
665{
666 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530667 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
668 struct sdhci_msm_host *msm_host = pltfm_host->priv;
669 const struct sdhci_msm_offset *msm_host_offset =
670 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700671
672 /* Program the MCLK value to MCLK_FREQ bit field */
673 if (host->clock <= 112000000)
674 mclk_freq = 0;
675 else if (host->clock <= 125000000)
676 mclk_freq = 1;
677 else if (host->clock <= 137000000)
678 mclk_freq = 2;
679 else if (host->clock <= 150000000)
680 mclk_freq = 3;
681 else if (host->clock <= 162000000)
682 mclk_freq = 4;
683 else if (host->clock <= 175000000)
684 mclk_freq = 5;
685 else if (host->clock <= 187000000)
686 mclk_freq = 6;
687 else if (host->clock <= 200000000)
688 mclk_freq = 7;
689
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530690 writel_relaxed(((readl_relaxed(host->ioaddr +
691 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700692 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530693 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700694}
695
696/* Initialize the DLL (Programmable Delay Line ) */
697static int msm_init_cm_dll(struct sdhci_host *host)
698{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
700 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530701 const struct sdhci_msm_offset *msm_host_offset =
702 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700703 struct mmc_host *mmc = host->mmc;
704 int rc = 0;
705 unsigned long flags;
706 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530707 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700708
709 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
710 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530711 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
712 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530713 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700714 /*
715 * Make sure that clock is always enabled when DLL
716 * tuning is in progress. Keeping PWRSAVE ON may
717 * turn off the clock. So let's disable the PWRSAVE
718 * here and re-enable it once tuning is completed.
719 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530720 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530721 writel_relaxed((readl_relaxed(host->ioaddr +
722 msm_host_offset->CORE_VENDOR_SPEC)
723 & ~CORE_CLK_PWRSAVE), host->ioaddr +
724 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530725 curr_pwrsave = false;
726 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700727
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800728 if (msm_host->use_updated_dll_reset) {
729 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530730 writel_relaxed((readl_relaxed(host->ioaddr +
731 msm_host_offset->CORE_DLL_CONFIG)
732 & ~CORE_CK_OUT_EN), host->ioaddr +
733 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800734
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530735 writel_relaxed((readl_relaxed(host->ioaddr +
736 msm_host_offset->CORE_DLL_CONFIG_2)
737 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
738 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800739 }
740
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700741 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530742 writel_relaxed((readl_relaxed(host->ioaddr +
743 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
744 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700745
746 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530747 writel_relaxed((readl_relaxed(host->ioaddr +
748 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
749 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700750 msm_cm_dll_set_freq(host);
751
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800752 if (msm_host->use_updated_dll_reset) {
753 u32 mclk_freq = 0;
754
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530755 if ((readl_relaxed(host->ioaddr +
756 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800757 & CORE_FLL_CYCLE_CNT))
758 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
759 else
760 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
761
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530762 writel_relaxed(((readl_relaxed(host->ioaddr +
763 msm_host_offset->CORE_DLL_CONFIG_2)
764 & ~(0xFF << 10)) | (mclk_freq << 10)),
765 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800766 /* wait for 5us before enabling DLL clock */
767 udelay(5);
768 }
769
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700770 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530771 writel_relaxed((readl_relaxed(host->ioaddr +
772 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
773 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700774
775 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530776 writel_relaxed((readl_relaxed(host->ioaddr +
777 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
778 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700779
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800780 if (msm_host->use_updated_dll_reset) {
781 msm_cm_dll_set_freq(host);
782 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530783 writel_relaxed((readl_relaxed(host->ioaddr +
784 msm_host_offset->CORE_DLL_CONFIG_2)
785 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
786 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800787 }
788
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700789 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530790 writel_relaxed((readl_relaxed(host->ioaddr +
791 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
792 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700793
794 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530795 writel_relaxed((readl_relaxed(host->ioaddr +
796 msm_host_offset->CORE_DLL_CONFIG)
797 | CORE_CK_OUT_EN), host->ioaddr +
798 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700799
800 wait_cnt = 50;
801 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530802 while (!(readl_relaxed(host->ioaddr +
803 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700804 /* max. wait for 50us sec for LOCK bit to be set */
805 if (--wait_cnt == 0) {
806 pr_err("%s: %s: DLL failed to LOCK\n",
807 mmc_hostname(mmc), __func__);
808 rc = -ETIMEDOUT;
809 goto out;
810 }
811 /* wait for 1us before polling again */
812 udelay(1);
813 }
814
815out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530816 /* Restore the correct PWRSAVE state */
817 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530818 u32 reg = readl_relaxed(host->ioaddr +
819 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530820
821 if (prev_pwrsave)
822 reg |= CORE_CLK_PWRSAVE;
823 else
824 reg &= ~CORE_CLK_PWRSAVE;
825
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530826 writel_relaxed(reg, host->ioaddr +
827 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530828 }
829
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700830 spin_unlock_irqrestore(&host->lock, flags);
831 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
832 return rc;
833}
834
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700835static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
836{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700837 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700838 int ret = 0;
839 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530840 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
841 struct sdhci_msm_host *msm_host = pltfm_host->priv;
842 const struct sdhci_msm_offset *msm_host_offset =
843 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844
845 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
846
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700847 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530848 writel_relaxed((readl_relaxed(host->ioaddr +
849 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700850 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530851 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700852
853 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
854 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
855 & ~CORE_CDC_SWITCH_BYPASS_OFF),
856 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
857
858 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
859 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
860 | CORE_CDC_SWITCH_RC_EN),
861 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
862
863 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530864 writel_relaxed((readl_relaxed(host->ioaddr +
865 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700866 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530867 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700868
869 /*
870 * Perform CDC Register Initialization Sequence
871 *
872 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
873 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
874 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
875 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
876 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
877 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
878 * CORE_CSR_CDC_DELAY_CFG 0x3AC
879 * CORE_CDC_OFFSET_CFG 0x0
880 * CORE_CDC_SLAVE_DDA_CFG 0x16334
881 */
882
883 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
884 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
885 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
886 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
887 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
888 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700889 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700890 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
891 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
892
893 /* CDC HW Calibration */
894
895 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
896 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
897 | CORE_SW_TRIG_FULL_CALIB),
898 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
899
900 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
901 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
902 & ~CORE_SW_TRIG_FULL_CALIB),
903 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
904
905 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
906 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
907 | CORE_HW_AUTOCAL_ENA),
908 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
909
910 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
911 writel_relaxed((readl_relaxed(host->ioaddr +
912 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
913 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
914
915 mb();
916
917 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700918 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
919 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
920
921 if (ret == -ETIMEDOUT) {
922 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700923 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700924 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700925 }
926
927 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
928 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
929 & CORE_CDC_ERROR_CODE_MASK;
930 if (cdc_err) {
931 pr_err("%s: %s: CDC Error Code %d\n",
932 mmc_hostname(host->mmc), __func__, cdc_err);
933 ret = -EINVAL;
934 goto out;
935 }
936
937 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530938 writel_relaxed((readl_relaxed(host->ioaddr +
939 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700940 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530941 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700942out:
943 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
944 __func__, ret);
945 return ret;
946}
947
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700948static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
949{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
951 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530952 const struct sdhci_msm_offset *msm_host_offset =
953 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530954 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700955 int ret = 0;
956
957 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
958
959 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530960 * Reprogramming the value in case it might have been modified by
961 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700962 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700963 if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530964 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
965 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700966 } else {
967 ddr_config = DDR_CONFIG_POR_VAL &
968 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
969 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530970 writel_relaxed(ddr_config, host->ioaddr +
971 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700972 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700973
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530974 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530975 writel_relaxed((readl_relaxed(host->ioaddr +
976 msm_host_offset->CORE_DDR_200_CFG)
977 | CORE_CMDIN_RCLK_EN), host->ioaddr +
978 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530979
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700980 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530981 writel_relaxed((readl_relaxed(host->ioaddr +
982 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700983 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530984 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700985
986 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 ret = readl_poll_timeout(host->ioaddr +
988 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700989 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
990
991 if (ret == -ETIMEDOUT) {
992 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
993 mmc_hostname(host->mmc), __func__);
994 goto out;
995 }
996
Ritesh Harjani764065e2015-05-13 14:14:45 +0530997 /*
998 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
999 * when MCLK is gated OFF, it is not gated for less than 0.5us
1000 * and MCLK must be switched on for at-least 1us before DATA
1001 * starts coming. Controllers with 14lpp tech DLL cannot
1002 * guarantee above requirement. So PWRSAVE_DLL should not be
1003 * turned on for host controllers using this DLL.
1004 */
1005 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301006 writel_relaxed((readl_relaxed(host->ioaddr +
1007 msm_host_offset->CORE_VENDOR_SPEC3)
1008 | CORE_PWRSAVE_DLL), host->ioaddr +
1009 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001010 mb();
1011out:
1012 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1013 __func__, ret);
1014 return ret;
1015}
1016
Ritesh Harjaniea709662015-05-27 15:40:24 +05301017static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1018{
1019 int ret = 0;
1020 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1021 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1022 struct mmc_host *mmc = host->mmc;
1023
1024 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1025
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301026 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1027 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301028 mmc_hostname(mmc));
1029 return -EINVAL;
1030 }
1031
1032 if (msm_host->calibration_done ||
1033 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1034 return 0;
1035 }
1036
1037 /*
1038 * Reset the tuning block.
1039 */
1040 ret = msm_init_cm_dll(host);
1041 if (ret)
1042 goto out;
1043
1044 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1045out:
1046 if (!ret)
1047 msm_host->calibration_done = true;
1048 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1049 __func__, ret);
1050 return ret;
1051}
1052
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001053static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1054{
1055 int ret = 0;
1056 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1057 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301058 const struct sdhci_msm_offset *msm_host_offset =
1059 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001060
1061 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1062
1063 /*
1064 * Retuning in HS400 (DDR mode) will fail, just reset the
1065 * tuning block and restore the saved tuning phase.
1066 */
1067 ret = msm_init_cm_dll(host);
1068 if (ret)
1069 goto out;
1070
1071 /* Set the selected phase in delay line hw block */
1072 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1073 if (ret)
1074 goto out;
1075
Krishna Konda0e8efba2014-06-23 14:50:38 -07001076 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301077 writel_relaxed((readl_relaxed(host->ioaddr +
1078 msm_host_offset->CORE_DLL_CONFIG)
1079 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1080 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001081
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001082 if (msm_host->use_cdclp533)
1083 /* Calibrate CDCLP533 DLL HW */
1084 ret = sdhci_msm_cdclp533_calibration(host);
1085 else
1086 /* Calibrate CM_DLL_SDC4 HW */
1087 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1088out:
1089 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1090 __func__, ret);
1091 return ret;
1092}
1093
Krishna Konda96e6b112013-10-28 15:25:03 -07001094static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1095 u8 drv_type)
1096{
1097 struct mmc_command cmd = {0};
1098 struct mmc_request mrq = {NULL};
1099 struct mmc_host *mmc = host->mmc;
1100 u8 val = ((drv_type << 4) | 2);
1101
1102 cmd.opcode = MMC_SWITCH;
1103 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1104 (EXT_CSD_HS_TIMING << 16) |
1105 (val << 8) |
1106 EXT_CSD_CMD_SET_NORMAL;
1107 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1108 /* 1 sec */
1109 cmd.busy_timeout = 1000 * 1000;
1110
1111 memset(cmd.resp, 0, sizeof(cmd.resp));
1112 cmd.retries = 3;
1113
1114 mrq.cmd = &cmd;
1115 cmd.data = NULL;
1116
1117 mmc_wait_for_req(mmc, &mrq);
1118 pr_debug("%s: %s: set card drive type to %d\n",
1119 mmc_hostname(mmc), __func__,
1120 drv_type);
1121}
1122
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001123int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1124{
1125 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301126 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001127 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001128 const u32 *tuning_block_pattern = tuning_block_64;
1129 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1130 int rc;
1131 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301132 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001133 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1134 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001135 u8 drv_type = 0;
1136 bool drv_type_changed = false;
1137 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301138 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301139
1140 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001141 * Tuning is required for SDR104, HS200 and HS400 cards and
1142 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301143 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001144 if (host->clock <= CORE_FREQ_100MHZ ||
1145 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1146 (ios.timing == MMC_TIMING_MMC_HS200) ||
1147 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301148 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001149
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301150 /*
1151 * Don't allow re-tuning for CRC errors observed for any commands
1152 * that are sent during tuning sequence itself.
1153 */
1154 if (msm_host->tuning_in_progress)
1155 return 0;
1156 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001157 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001158
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001159 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001160 if (msm_host->tuning_done && !msm_host->calibration_done &&
1161 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001162 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001163 spin_lock_irqsave(&host->lock, flags);
1164 if (!rc)
1165 msm_host->calibration_done = true;
1166 spin_unlock_irqrestore(&host->lock, flags);
1167 goto out;
1168 }
1169
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001170 spin_lock_irqsave(&host->lock, flags);
1171
1172 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1173 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1174 tuning_block_pattern = tuning_block_128;
1175 size = sizeof(tuning_block_128);
1176 }
1177 spin_unlock_irqrestore(&host->lock, flags);
1178
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001179 data_buf = kmalloc(size, GFP_KERNEL);
1180 if (!data_buf) {
1181 rc = -ENOMEM;
1182 goto out;
1183 }
1184
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301185retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001186 tuned_phase_cnt = 0;
1187
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301188 /* first of all reset the tuning block */
1189 rc = msm_init_cm_dll(host);
1190 if (rc)
1191 goto kfree;
1192
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001193 phase = 0;
1194 do {
1195 struct mmc_command cmd = {0};
1196 struct mmc_data data = {0};
1197 struct mmc_request mrq = {
1198 .cmd = &cmd,
1199 .data = &data
1200 };
1201 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301202 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001203
1204 /* set the phase in delay line hw block */
1205 rc = msm_config_cm_dll_phase(host, phase);
1206 if (rc)
1207 goto kfree;
1208
1209 cmd.opcode = opcode;
1210 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1211
1212 data.blksz = size;
1213 data.blocks = 1;
1214 data.flags = MMC_DATA_READ;
1215 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1216
1217 data.sg = &sg;
1218 data.sg_len = 1;
1219 sg_init_one(&sg, data_buf, size);
1220 memset(data_buf, 0, size);
1221 mmc_wait_for_req(mmc, &mrq);
1222
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301223 if (card && (cmd.error || data.error)) {
1224 sts_cmd.opcode = MMC_SEND_STATUS;
1225 sts_cmd.arg = card->rca << 16;
1226 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1227 sts_retry = 5;
1228 while (sts_retry) {
1229 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1230
1231 if (sts_cmd.error ||
1232 (R1_CURRENT_STATE(sts_cmd.resp[0])
1233 != R1_STATE_TRAN)) {
1234 sts_retry--;
1235 /*
1236 * wait for at least 146 MCLK cycles for
1237 * the card to move to TRANS state. As
1238 * the MCLK would be min 200MHz for
1239 * tuning, we need max 0.73us delay. To
1240 * be on safer side 1ms delay is given.
1241 */
1242 usleep_range(1000, 1200);
1243 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1244 mmc_hostname(mmc), phase,
1245 sts_cmd.error, sts_cmd.resp[0]);
1246 continue;
1247 }
1248 break;
1249 };
1250 }
1251
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001252 if (!cmd.error && !data.error &&
1253 !memcmp(data_buf, tuning_block_pattern, size)) {
1254 /* tuning is successful at this tuning point */
1255 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001256 pr_debug("%s: %s: found *** good *** phase = %d\n",
1257 mmc_hostname(mmc), __func__, phase);
1258 } else {
1259 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001260 mmc_hostname(mmc), __func__, phase);
1261 }
1262 } while (++phase < 16);
1263
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301264 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1265 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001266 /*
1267 * If all phases pass then its a problem. So change the card's
1268 * drive type to a different value, if supported and repeat
1269 * tuning until at least one phase fails. Then set the original
1270 * drive type back.
1271 *
1272 * If all the phases still pass after trying all possible
1273 * drive types, then one of those 16 phases will be picked.
1274 * This is no different from what was going on before the
1275 * modification to change drive type and retune.
1276 */
1277 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1278 tuned_phase_cnt);
1279
1280 /* set drive type to other value . default setting is 0x0 */
1281 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001282 pr_debug("%s: trying different drive strength (%d)\n",
1283 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001284 if (card->ext_csd.raw_driver_strength &
1285 (1 << drv_type)) {
1286 sdhci_msm_set_mmc_drv_type(host, opcode,
1287 drv_type);
1288 if (!drv_type_changed)
1289 drv_type_changed = true;
1290 goto retry;
1291 }
1292 }
1293 }
1294
1295 /* reset drive type to default (50 ohm) if changed */
1296 if (drv_type_changed)
1297 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1298
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001299 if (tuned_phase_cnt) {
1300 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1301 tuned_phase_cnt);
1302 if (rc < 0)
1303 goto kfree;
1304 else
1305 phase = (u8)rc;
1306
1307 /*
1308 * Finally set the selected phase in delay
1309 * line hw block.
1310 */
1311 rc = msm_config_cm_dll_phase(host, phase);
1312 if (rc)
1313 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001314 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001315 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1316 mmc_hostname(mmc), __func__, phase);
1317 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301318 if (--tuning_seq_cnt)
1319 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001320 /* tuning failed */
1321 pr_err("%s: %s: no tuning point found\n",
1322 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301323 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001324 }
1325
1326kfree:
1327 kfree(data_buf);
1328out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001329 spin_lock_irqsave(&host->lock, flags);
1330 if (!rc)
1331 msm_host->tuning_done = true;
1332 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301333 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001334 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001335 return rc;
1336}
1337
Asutosh Das0ef24812012-12-18 16:14:02 +05301338static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1339{
1340 struct sdhci_msm_gpio_data *curr;
1341 int i, ret = 0;
1342
1343 curr = pdata->pin_data->gpio_data;
1344 for (i = 0; i < curr->size; i++) {
1345 if (!gpio_is_valid(curr->gpio[i].no)) {
1346 ret = -EINVAL;
1347 pr_err("%s: Invalid gpio = %d\n", __func__,
1348 curr->gpio[i].no);
1349 goto free_gpios;
1350 }
1351 if (enable) {
1352 ret = gpio_request(curr->gpio[i].no,
1353 curr->gpio[i].name);
1354 if (ret) {
1355 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1356 __func__, curr->gpio[i].no,
1357 curr->gpio[i].name, ret);
1358 goto free_gpios;
1359 }
1360 curr->gpio[i].is_enabled = true;
1361 } else {
1362 gpio_free(curr->gpio[i].no);
1363 curr->gpio[i].is_enabled = false;
1364 }
1365 }
1366 return ret;
1367
1368free_gpios:
1369 for (i--; i >= 0; i--) {
1370 gpio_free(curr->gpio[i].no);
1371 curr->gpio[i].is_enabled = false;
1372 }
1373 return ret;
1374}
1375
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301376static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1377 bool enable)
1378{
1379 int ret = 0;
1380
1381 if (enable)
1382 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1383 pdata->pctrl_data->pins_active);
1384 else
1385 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1386 pdata->pctrl_data->pins_sleep);
1387
1388 if (ret < 0)
1389 pr_err("%s state for pinctrl failed with %d\n",
1390 enable ? "Enabling" : "Disabling", ret);
1391
1392 return ret;
1393}
1394
Asutosh Das0ef24812012-12-18 16:14:02 +05301395static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1396{
1397 int ret = 0;
1398
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301399 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301400 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301401 } else if (pdata->pctrl_data) {
1402 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1403 goto out;
1404 } else if (!pdata->pin_data) {
1405 return 0;
1406 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301407
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301408 if (pdata->pin_data->is_gpio)
1409 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301410out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301411 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301412 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301413
1414 return ret;
1415}
1416
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301417static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1418 u32 **out, int *len, u32 size)
1419{
1420 int ret = 0;
1421 struct device_node *np = dev->of_node;
1422 size_t sz;
1423 u32 *arr = NULL;
1424
1425 if (!of_get_property(np, prop_name, len)) {
1426 ret = -EINVAL;
1427 goto out;
1428 }
1429 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001430 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301431 dev_err(dev, "%s invalid size\n", prop_name);
1432 ret = -EINVAL;
1433 goto out;
1434 }
1435
1436 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1437 if (!arr) {
1438 dev_err(dev, "%s failed allocating memory\n", prop_name);
1439 ret = -ENOMEM;
1440 goto out;
1441 }
1442
1443 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1444 if (ret < 0) {
1445 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1446 goto out;
1447 }
1448 *out = arr;
1449out:
1450 if (ret)
1451 *len = 0;
1452 return ret;
1453}
1454
Asutosh Das0ef24812012-12-18 16:14:02 +05301455#define MAX_PROP_SIZE 32
1456static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1457 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1458{
1459 int len, ret = 0;
1460 const __be32 *prop;
1461 char prop_name[MAX_PROP_SIZE];
1462 struct sdhci_msm_reg_data *vreg;
1463 struct device_node *np = dev->of_node;
1464
1465 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1466 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301467 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301468 return ret;
1469 }
1470
1471 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1472 if (!vreg) {
1473 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1474 ret = -ENOMEM;
1475 return ret;
1476 }
1477
1478 vreg->name = vreg_name;
1479
1480 snprintf(prop_name, MAX_PROP_SIZE,
1481 "qcom,%s-always-on", vreg_name);
1482 if (of_get_property(np, prop_name, NULL))
1483 vreg->is_always_on = true;
1484
1485 snprintf(prop_name, MAX_PROP_SIZE,
1486 "qcom,%s-lpm-sup", vreg_name);
1487 if (of_get_property(np, prop_name, NULL))
1488 vreg->lpm_sup = true;
1489
1490 snprintf(prop_name, MAX_PROP_SIZE,
1491 "qcom,%s-voltage-level", vreg_name);
1492 prop = of_get_property(np, prop_name, &len);
1493 if (!prop || (len != (2 * sizeof(__be32)))) {
1494 dev_warn(dev, "%s %s property\n",
1495 prop ? "invalid format" : "no", prop_name);
1496 } else {
1497 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1498 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1499 }
1500
1501 snprintf(prop_name, MAX_PROP_SIZE,
1502 "qcom,%s-current-level", vreg_name);
1503 prop = of_get_property(np, prop_name, &len);
1504 if (!prop || (len != (2 * sizeof(__be32)))) {
1505 dev_warn(dev, "%s %s property\n",
1506 prop ? "invalid format" : "no", prop_name);
1507 } else {
1508 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1509 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1510 }
1511
1512 *vreg_data = vreg;
1513 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1514 vreg->name, vreg->is_always_on ? "always_on," : "",
1515 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1516 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1517
1518 return ret;
1519}
1520
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301521static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1522 struct sdhci_msm_pltfm_data *pdata)
1523{
1524 struct sdhci_pinctrl_data *pctrl_data;
1525 struct pinctrl *pctrl;
1526 int ret = 0;
1527
1528 /* Try to obtain pinctrl handle */
1529 pctrl = devm_pinctrl_get(dev);
1530 if (IS_ERR(pctrl)) {
1531 ret = PTR_ERR(pctrl);
1532 goto out;
1533 }
1534 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1535 if (!pctrl_data) {
1536 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1537 ret = -ENOMEM;
1538 goto out;
1539 }
1540 pctrl_data->pctrl = pctrl;
1541 /* Look-up and keep the states handy to be used later */
1542 pctrl_data->pins_active = pinctrl_lookup_state(
1543 pctrl_data->pctrl, "active");
1544 if (IS_ERR(pctrl_data->pins_active)) {
1545 ret = PTR_ERR(pctrl_data->pins_active);
1546 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1547 goto out;
1548 }
1549 pctrl_data->pins_sleep = pinctrl_lookup_state(
1550 pctrl_data->pctrl, "sleep");
1551 if (IS_ERR(pctrl_data->pins_sleep)) {
1552 ret = PTR_ERR(pctrl_data->pins_sleep);
1553 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1554 goto out;
1555 }
1556 pdata->pctrl_data = pctrl_data;
1557out:
1558 return ret;
1559}
1560
Asutosh Das0ef24812012-12-18 16:14:02 +05301561#define GPIO_NAME_MAX_LEN 32
1562static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1563 struct sdhci_msm_pltfm_data *pdata)
1564{
1565 int ret = 0, cnt, i;
1566 struct sdhci_msm_pin_data *pin_data;
1567 struct device_node *np = dev->of_node;
1568
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301569 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1570 if (!ret) {
1571 goto out;
1572 } else if (ret == -EPROBE_DEFER) {
1573 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1574 goto out;
1575 } else {
1576 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1577 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301578 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301579 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301580 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1581 if (!pin_data) {
1582 dev_err(dev, "No memory for pin_data\n");
1583 ret = -ENOMEM;
1584 goto out;
1585 }
1586
1587 cnt = of_gpio_count(np);
1588 if (cnt > 0) {
1589 pin_data->gpio_data = devm_kzalloc(dev,
1590 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1591 if (!pin_data->gpio_data) {
1592 dev_err(dev, "No memory for gpio_data\n");
1593 ret = -ENOMEM;
1594 goto out;
1595 }
1596 pin_data->gpio_data->size = cnt;
1597 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1598 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1599
1600 if (!pin_data->gpio_data->gpio) {
1601 dev_err(dev, "No memory for gpio\n");
1602 ret = -ENOMEM;
1603 goto out;
1604 }
1605
1606 for (i = 0; i < cnt; i++) {
1607 const char *name = NULL;
1608 char result[GPIO_NAME_MAX_LEN];
1609 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1610 of_property_read_string_index(np,
1611 "qcom,gpio-names", i, &name);
1612
1613 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1614 dev_name(dev), name ? name : "?");
1615 pin_data->gpio_data->gpio[i].name = result;
1616 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1617 pin_data->gpio_data->gpio[i].name,
1618 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301619 }
1620 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301621 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301622out:
1623 if (ret)
1624 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1625 return ret;
1626}
1627
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001628#ifdef CONFIG_SMP
1629static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1630{
1631 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1632}
1633#else
1634static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1635#endif
1636
Gilad Bronerc788a672015-09-08 15:39:11 +03001637static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1638 struct sdhci_msm_pltfm_data *pdata)
1639{
1640 struct device_node *np = dev->of_node;
1641 const char *str;
1642 u32 cpu;
1643 int ret = 0;
1644 int i;
1645
1646 pdata->pm_qos_data.irq_valid = false;
1647 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1648 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1649 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001650 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001651 }
1652
1653 /* must specify cpu for "affine_cores" type */
1654 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1655 pdata->pm_qos_data.irq_cpu = -1;
1656 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1657 if (ret) {
1658 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1659 ret);
1660 goto out;
1661 }
1662 if (cpu < 0 || cpu >= num_possible_cpus()) {
1663 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1664 __func__, cpu, num_possible_cpus());
1665 ret = -EINVAL;
1666 goto out;
1667 }
1668 pdata->pm_qos_data.irq_cpu = cpu;
1669 }
1670
1671 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1672 SDHCI_POWER_POLICY_NUM) {
1673 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1674 __func__, SDHCI_POWER_POLICY_NUM);
1675 ret = -EINVAL;
1676 goto out;
1677 }
1678
1679 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1680 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1681 &pdata->pm_qos_data.irq_latency.latency[i]);
1682
1683 pdata->pm_qos_data.irq_valid = true;
1684out:
1685 return ret;
1686}
1687
1688static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1689 struct sdhci_msm_pltfm_data *pdata)
1690{
1691 struct device_node *np = dev->of_node;
1692 u32 mask;
1693 int nr_groups;
1694 int ret;
1695 int i;
1696
1697 /* Read cpu group mapping */
1698 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1699 if (nr_groups <= 0) {
1700 ret = -EINVAL;
1701 goto out;
1702 }
1703 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1704 pdata->pm_qos_data.cpu_group_map.mask =
1705 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1706 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1707 ret = -ENOMEM;
1708 goto out;
1709 }
1710
1711 for (i = 0; i < nr_groups; i++) {
1712 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1713 i, &mask);
1714
1715 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1716 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1717 cpu_possible_mask)) {
1718 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1719 __func__, mask, i);
1720 ret = -EINVAL;
1721 goto free_res;
1722 }
1723 }
1724 return 0;
1725
1726free_res:
1727 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1728out:
1729 return ret;
1730}
1731
1732static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1733 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1734{
1735 struct device_node *np = dev->of_node;
1736 struct sdhci_msm_pm_qos_latency *values;
1737 int ret;
1738 int i;
1739 int group;
1740 int cfg;
1741
1742 ret = of_property_count_u32_elems(np, name);
1743 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1744 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1745 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1746 ret);
1747 return -EINVAL;
1748 } else if (ret < 0) {
1749 return ret;
1750 }
1751
1752 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1753 GFP_KERNEL);
1754 if (!values)
1755 return -ENOMEM;
1756
1757 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1758 group = i / SDHCI_POWER_POLICY_NUM;
1759 cfg = i % SDHCI_POWER_POLICY_NUM;
1760 of_property_read_u32_index(np, name, i,
1761 &(values[group].latency[cfg]));
1762 }
1763
1764 *latency = values;
1765 return 0;
1766}
1767
1768static void sdhci_msm_pm_qos_parse(struct device *dev,
1769 struct sdhci_msm_pltfm_data *pdata)
1770{
1771 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1772 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1773 __func__);
1774
1775 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1776 pdata->pm_qos_data.cmdq_valid =
1777 !sdhci_msm_pm_qos_parse_latency(dev,
1778 "qcom,pm-qos-cmdq-latency-us",
1779 pdata->pm_qos_data.cpu_group_map.nr_groups,
1780 &pdata->pm_qos_data.cmdq_latency);
1781 pdata->pm_qos_data.legacy_valid =
1782 !sdhci_msm_pm_qos_parse_latency(dev,
1783 "qcom,pm-qos-legacy-latency-us",
1784 pdata->pm_qos_data.cpu_group_map.nr_groups,
1785 &pdata->pm_qos_data.latency);
1786 if (!pdata->pm_qos_data.cmdq_valid &&
1787 !pdata->pm_qos_data.legacy_valid) {
1788 /* clean-up previously allocated arrays */
1789 kfree(pdata->pm_qos_data.latency);
1790 kfree(pdata->pm_qos_data.cmdq_latency);
1791 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1792 __func__);
1793 }
1794 } else {
1795 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1796 __func__);
1797 }
1798}
1799
Asutosh Das0ef24812012-12-18 16:14:02 +05301800/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001801static
1802struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1803 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301804{
1805 struct sdhci_msm_pltfm_data *pdata = NULL;
1806 struct device_node *np = dev->of_node;
1807 u32 bus_width = 0;
1808 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301809 int clk_table_len;
1810 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301811 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301812
1813 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1814 if (!pdata) {
1815 dev_err(dev, "failed to allocate memory for platform data\n");
1816 goto out;
1817 }
1818
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301819 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1820 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1821 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301822
Asutosh Das0ef24812012-12-18 16:14:02 +05301823 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1824 if (bus_width == 8)
1825 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1826 else if (bus_width == 4)
1827 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1828 else {
1829 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1830 pdata->mmc_bus_width = 0;
1831 }
1832
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001833 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1834 &msm_host->mmc->clk_scaling.freq_table,
1835 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1836 pr_debug("%s: no clock scaling frequencies were supplied\n",
1837 dev_name(dev));
1838 else if (!msm_host->mmc->clk_scaling.freq_table ||
1839 !msm_host->mmc->clk_scaling.freq_table_sz)
1840 dev_err(dev, "bad dts clock scaling frequencies\n");
1841
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301842 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1843 &clk_table, &clk_table_len, 0)) {
1844 dev_err(dev, "failed parsing supported clock rates\n");
1845 goto out;
1846 }
1847 if (!clk_table || !clk_table_len) {
1848 dev_err(dev, "Invalid clock table\n");
1849 goto out;
1850 }
1851 pdata->sup_clk_table = clk_table;
1852 pdata->sup_clk_cnt = clk_table_len;
1853
Asutosh Das0ef24812012-12-18 16:14:02 +05301854 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1855 sdhci_msm_slot_reg_data),
1856 GFP_KERNEL);
1857 if (!pdata->vreg_data) {
1858 dev_err(dev, "failed to allocate memory for vreg data\n");
1859 goto out;
1860 }
1861
1862 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1863 "vdd")) {
1864 dev_err(dev, "failed parsing vdd data\n");
1865 goto out;
1866 }
1867 if (sdhci_msm_dt_parse_vreg_info(dev,
1868 &pdata->vreg_data->vdd_io_data,
1869 "vdd-io")) {
1870 dev_err(dev, "failed parsing vdd-io data\n");
1871 goto out;
1872 }
1873
1874 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1875 dev_err(dev, "failed parsing gpio data\n");
1876 goto out;
1877 }
1878
Asutosh Das0ef24812012-12-18 16:14:02 +05301879 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1880
1881 for (i = 0; i < len; i++) {
1882 const char *name = NULL;
1883
1884 of_property_read_string_index(np,
1885 "qcom,bus-speed-mode", i, &name);
1886 if (!name)
1887 continue;
1888
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001889 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1890 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1891 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1892 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1893 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301894 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1895 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1896 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1897 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1898 pdata->caps |= MMC_CAP_1_8V_DDR
1899 | MMC_CAP_UHS_DDR50;
1900 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1901 pdata->caps |= MMC_CAP_1_2V_DDR
1902 | MMC_CAP_UHS_DDR50;
1903 }
1904
1905 if (of_get_property(np, "qcom,nonremovable", NULL))
1906 pdata->nonremovable = true;
1907
Guoping Yuf7c91332014-08-20 16:56:18 +08001908 if (of_get_property(np, "qcom,nonhotplug", NULL))
1909 pdata->nonhotplug = true;
1910
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001911 pdata->largeaddressbus =
1912 of_property_read_bool(np, "qcom,large-address-bus");
1913
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001914 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1915 msm_host->mmc->wakeup_on_idle = true;
1916
Gilad Bronerc788a672015-09-08 15:39:11 +03001917 sdhci_msm_pm_qos_parse(dev, pdata);
1918
Pavan Anamula5a256df2015-10-16 14:38:28 +05301919 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1920 pdata->core_3_0v_support = true;
1921
Asutosh Das0ef24812012-12-18 16:14:02 +05301922 return pdata;
1923out:
1924 return NULL;
1925}
1926
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301927/* Returns required bandwidth in Bytes per Sec */
1928static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1929 struct mmc_ios *ios)
1930{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301931 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1932 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1933
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301934 unsigned int bw;
1935
Sahitya Tummala2886c922013-04-03 18:03:31 +05301936 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301937 /*
1938 * For DDR mode, SDCC controller clock will be at
1939 * the double rate than the actual clock that goes to card.
1940 */
1941 if (ios->bus_width == MMC_BUS_WIDTH_4)
1942 bw /= 2;
1943 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1944 bw /= 8;
1945
1946 return bw;
1947}
1948
1949static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1950 unsigned int bw)
1951{
1952 unsigned int *table = host->pdata->voting_data->bw_vecs;
1953 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1954 int i;
1955
1956 if (host->msm_bus_vote.is_max_bw_needed && bw)
1957 return host->msm_bus_vote.max_bw_vote;
1958
1959 for (i = 0; i < size; i++) {
1960 if (bw <= table[i])
1961 break;
1962 }
1963
1964 if (i && (i == size))
1965 i--;
1966
1967 return i;
1968}
1969
1970/*
1971 * This function must be called with host lock acquired.
1972 * Caller of this function should also ensure that msm bus client
1973 * handle is not null.
1974 */
1975static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1976 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301977 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301978{
1979 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1980 int rc = 0;
1981
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301982 BUG_ON(!flags);
1983
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301984 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301985 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301986 rc = msm_bus_scale_client_update_request(
1987 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301988 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301989 if (rc) {
1990 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1991 mmc_hostname(host->mmc),
1992 msm_host->msm_bus_vote.client_handle, vote, rc);
1993 goto out;
1994 }
1995 msm_host->msm_bus_vote.curr_vote = vote;
1996 }
1997out:
1998 return rc;
1999}
2000
2001/*
2002 * Internal work. Work to set 0 bandwidth for msm bus.
2003 */
2004static void sdhci_msm_bus_work(struct work_struct *work)
2005{
2006 struct sdhci_msm_host *msm_host;
2007 struct sdhci_host *host;
2008 unsigned long flags;
2009
2010 msm_host = container_of(work, struct sdhci_msm_host,
2011 msm_bus_vote.vote_work.work);
2012 host = platform_get_drvdata(msm_host->pdev);
2013
2014 if (!msm_host->msm_bus_vote.client_handle)
2015 return;
2016
2017 spin_lock_irqsave(&host->lock, flags);
2018 /* don't vote for 0 bandwidth if any request is in progress */
2019 if (!host->mrq) {
2020 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302021 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302022 } else
2023 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2024 mmc_hostname(host->mmc), __func__);
2025 spin_unlock_irqrestore(&host->lock, flags);
2026}
2027
2028/*
2029 * This function cancels any scheduled delayed work and sets the bus
2030 * vote based on bw (bandwidth) argument.
2031 */
2032static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2033 unsigned int bw)
2034{
2035 int vote;
2036 unsigned long flags;
2037 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2038 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2039
2040 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2041 spin_lock_irqsave(&host->lock, flags);
2042 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302043 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302044 spin_unlock_irqrestore(&host->lock, flags);
2045}
2046
2047#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2048
2049/* This function queues a work which will set the bandwidth requiement to 0 */
2050static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2051{
2052 unsigned long flags;
2053 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2054 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2055
2056 spin_lock_irqsave(&host->lock, flags);
2057 if (msm_host->msm_bus_vote.min_bw_vote !=
2058 msm_host->msm_bus_vote.curr_vote)
2059 queue_delayed_work(system_wq,
2060 &msm_host->msm_bus_vote.vote_work,
2061 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2062 spin_unlock_irqrestore(&host->lock, flags);
2063}
2064
2065static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2066 struct platform_device *pdev)
2067{
2068 int rc = 0;
2069 struct msm_bus_scale_pdata *bus_pdata;
2070
2071 struct sdhci_msm_bus_voting_data *data;
2072 struct device *dev = &pdev->dev;
2073
2074 data = devm_kzalloc(dev,
2075 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2076 if (!data) {
2077 dev_err(&pdev->dev,
2078 "%s: failed to allocate memory\n", __func__);
2079 rc = -ENOMEM;
2080 goto out;
2081 }
2082 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2083 if (data->bus_pdata) {
2084 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2085 &data->bw_vecs, &data->bw_vecs_size, 0);
2086 if (rc) {
2087 dev_err(&pdev->dev,
2088 "%s: Failed to get bus-bw-vectors-bps\n",
2089 __func__);
2090 goto out;
2091 }
2092 host->pdata->voting_data = data;
2093 }
2094 if (host->pdata->voting_data &&
2095 host->pdata->voting_data->bus_pdata &&
2096 host->pdata->voting_data->bw_vecs &&
2097 host->pdata->voting_data->bw_vecs_size) {
2098
2099 bus_pdata = host->pdata->voting_data->bus_pdata;
2100 host->msm_bus_vote.client_handle =
2101 msm_bus_scale_register_client(bus_pdata);
2102 if (!host->msm_bus_vote.client_handle) {
2103 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2104 rc = -EFAULT;
2105 goto out;
2106 }
2107 /* cache the vote index for minimum and maximum bandwidth */
2108 host->msm_bus_vote.min_bw_vote =
2109 sdhci_msm_bus_get_vote_for_bw(host, 0);
2110 host->msm_bus_vote.max_bw_vote =
2111 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2112 } else {
2113 devm_kfree(dev, data);
2114 }
2115
2116out:
2117 return rc;
2118}
2119
2120static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2121{
2122 if (host->msm_bus_vote.client_handle)
2123 msm_bus_scale_unregister_client(
2124 host->msm_bus_vote.client_handle);
2125}
2126
2127static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2128{
2129 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2130 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2131 struct mmc_ios *ios = &host->mmc->ios;
2132 unsigned int bw;
2133
2134 if (!msm_host->msm_bus_vote.client_handle)
2135 return;
2136
2137 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302138 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302139 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302140 } else {
2141 /*
2142 * If clock gating is enabled, then remove the vote
2143 * immediately because clocks will be disabled only
2144 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2145 * additional delay is required to remove the bus vote.
2146 */
2147#ifdef CONFIG_MMC_CLKGATE
2148 if (host->mmc->clkgate_delay)
2149 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2150 else
2151#endif
2152 sdhci_msm_bus_queue_work(host);
2153 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302154}
2155
Asutosh Das0ef24812012-12-18 16:14:02 +05302156/* Regulator utility functions */
2157static int sdhci_msm_vreg_init_reg(struct device *dev,
2158 struct sdhci_msm_reg_data *vreg)
2159{
2160 int ret = 0;
2161
2162 /* check if regulator is already initialized? */
2163 if (vreg->reg)
2164 goto out;
2165
2166 /* Get the regulator handle */
2167 vreg->reg = devm_regulator_get(dev, vreg->name);
2168 if (IS_ERR(vreg->reg)) {
2169 ret = PTR_ERR(vreg->reg);
2170 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2171 __func__, vreg->name, ret);
2172 goto out;
2173 }
2174
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302175 if (regulator_count_voltages(vreg->reg) > 0) {
2176 vreg->set_voltage_sup = true;
2177 /* sanity check */
2178 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2179 pr_err("%s: %s invalid constraints specified\n",
2180 __func__, vreg->name);
2181 ret = -EINVAL;
2182 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302183 }
2184
2185out:
2186 return ret;
2187}
2188
2189static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2190{
2191 if (vreg->reg)
2192 devm_regulator_put(vreg->reg);
2193}
2194
2195static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2196 *vreg, int uA_load)
2197{
2198 int ret = 0;
2199
2200 /*
2201 * regulators that do not support regulator_set_voltage also
2202 * do not support regulator_set_optimum_mode
2203 */
2204 if (vreg->set_voltage_sup) {
2205 ret = regulator_set_load(vreg->reg, uA_load);
2206 if (ret < 0)
2207 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2208 __func__, vreg->name, uA_load, ret);
2209 else
2210 /*
2211 * regulator_set_load() can return non zero
2212 * value even for success case.
2213 */
2214 ret = 0;
2215 }
2216 return ret;
2217}
2218
2219static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2220 int min_uV, int max_uV)
2221{
2222 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302223 if (vreg->set_voltage_sup) {
2224 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2225 if (ret) {
2226 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302227 __func__, vreg->name, min_uV, max_uV, ret);
2228 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302229 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302230
2231 return ret;
2232}
2233
2234static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2235{
2236 int ret = 0;
2237
2238 /* Put regulator in HPM (high power mode) */
2239 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2240 if (ret < 0)
2241 return ret;
2242
2243 if (!vreg->is_enabled) {
2244 /* Set voltage level */
2245 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2246 vreg->high_vol_level);
2247 if (ret)
2248 return ret;
2249 }
2250 ret = regulator_enable(vreg->reg);
2251 if (ret) {
2252 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2253 __func__, vreg->name, ret);
2254 return ret;
2255 }
2256 vreg->is_enabled = true;
2257 return ret;
2258}
2259
2260static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2261{
2262 int ret = 0;
2263
2264 /* Never disable regulator marked as always_on */
2265 if (vreg->is_enabled && !vreg->is_always_on) {
2266 ret = regulator_disable(vreg->reg);
2267 if (ret) {
2268 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2269 __func__, vreg->name, ret);
2270 goto out;
2271 }
2272 vreg->is_enabled = false;
2273
2274 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2275 if (ret < 0)
2276 goto out;
2277
2278 /* Set min. voltage level to 0 */
2279 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2280 if (ret)
2281 goto out;
2282 } else if (vreg->is_enabled && vreg->is_always_on) {
2283 if (vreg->lpm_sup) {
2284 /* Put always_on regulator in LPM (low power mode) */
2285 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2286 vreg->lpm_uA);
2287 if (ret < 0)
2288 goto out;
2289 }
2290 }
2291out:
2292 return ret;
2293}
2294
2295static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2296 bool enable, bool is_init)
2297{
2298 int ret = 0, i;
2299 struct sdhci_msm_slot_reg_data *curr_slot;
2300 struct sdhci_msm_reg_data *vreg_table[2];
2301
2302 curr_slot = pdata->vreg_data;
2303 if (!curr_slot) {
2304 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2305 __func__);
2306 goto out;
2307 }
2308
2309 vreg_table[0] = curr_slot->vdd_data;
2310 vreg_table[1] = curr_slot->vdd_io_data;
2311
2312 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2313 if (vreg_table[i]) {
2314 if (enable)
2315 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2316 else
2317 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2318 if (ret)
2319 goto out;
2320 }
2321 }
2322out:
2323 return ret;
2324}
2325
2326/*
2327 * Reset vreg by ensuring it is off during probe. A call
2328 * to enable vreg is needed to balance disable vreg
2329 */
2330static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2331{
2332 int ret;
2333
2334 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2335 if (ret)
2336 return ret;
2337 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2338 return ret;
2339}
2340
2341/* This init function should be called only once for each SDHC slot */
2342static int sdhci_msm_vreg_init(struct device *dev,
2343 struct sdhci_msm_pltfm_data *pdata,
2344 bool is_init)
2345{
2346 int ret = 0;
2347 struct sdhci_msm_slot_reg_data *curr_slot;
2348 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2349
2350 curr_slot = pdata->vreg_data;
2351 if (!curr_slot)
2352 goto out;
2353
2354 curr_vdd_reg = curr_slot->vdd_data;
2355 curr_vdd_io_reg = curr_slot->vdd_io_data;
2356
2357 if (!is_init)
2358 /* Deregister all regulators from regulator framework */
2359 goto vdd_io_reg_deinit;
2360
2361 /*
2362 * Get the regulator handle from voltage regulator framework
2363 * and then try to set the voltage level for the regulator
2364 */
2365 if (curr_vdd_reg) {
2366 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2367 if (ret)
2368 goto out;
2369 }
2370 if (curr_vdd_io_reg) {
2371 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2372 if (ret)
2373 goto vdd_reg_deinit;
2374 }
2375 ret = sdhci_msm_vreg_reset(pdata);
2376 if (ret)
2377 dev_err(dev, "vreg reset failed (%d)\n", ret);
2378 goto out;
2379
2380vdd_io_reg_deinit:
2381 if (curr_vdd_io_reg)
2382 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2383vdd_reg_deinit:
2384 if (curr_vdd_reg)
2385 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2386out:
2387 return ret;
2388}
2389
2390
2391static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2392 enum vdd_io_level level,
2393 unsigned int voltage_level)
2394{
2395 int ret = 0;
2396 int set_level;
2397 struct sdhci_msm_reg_data *vdd_io_reg;
2398
2399 if (!pdata->vreg_data)
2400 return ret;
2401
2402 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2403 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2404 switch (level) {
2405 case VDD_IO_LOW:
2406 set_level = vdd_io_reg->low_vol_level;
2407 break;
2408 case VDD_IO_HIGH:
2409 set_level = vdd_io_reg->high_vol_level;
2410 break;
2411 case VDD_IO_SET_LEVEL:
2412 set_level = voltage_level;
2413 break;
2414 default:
2415 pr_err("%s: invalid argument level = %d",
2416 __func__, level);
2417 ret = -EINVAL;
2418 return ret;
2419 }
2420 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2421 set_level);
2422 }
2423 return ret;
2424}
2425
Ritesh Harjani42876f42015-11-17 17:46:51 +05302426/*
2427 * Acquire spin-lock host->lock before calling this function
2428 */
2429static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2430 bool enable)
2431{
2432 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2433 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2434
2435 if (enable && !msm_host->is_sdiowakeup_enabled)
2436 enable_irq(msm_host->pdata->sdiowakeup_irq);
2437 else if (!enable && msm_host->is_sdiowakeup_enabled)
2438 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2439 else
2440 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2441 __func__, enable, msm_host->is_sdiowakeup_enabled);
2442 msm_host->is_sdiowakeup_enabled = enable;
2443}
2444
2445static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2446{
2447 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302448 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2449 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2450
Ritesh Harjani42876f42015-11-17 17:46:51 +05302451 unsigned long flags;
2452
2453 pr_debug("%s: irq (%d) received\n", __func__, irq);
2454
2455 spin_lock_irqsave(&host->lock, flags);
2456 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2457 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302458 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302459
2460 return IRQ_HANDLED;
2461}
2462
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302463void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2464{
2465 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2466 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302467 const struct sdhci_msm_offset *msm_host_offset =
2468 msm_host->offset;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302469
2470 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2471 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302472 sdhci_msm_readl_relaxed(host,
2473 msm_host_offset->CORE_PWRCTL_STATUS),
2474 sdhci_msm_readl_relaxed(host,
2475 msm_host_offset->CORE_PWRCTL_MASK),
2476 sdhci_msm_readl_relaxed(host,
2477 msm_host_offset->CORE_PWRCTL_CTL));
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302478}
2479
Asutosh Das0ef24812012-12-18 16:14:02 +05302480static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2481{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002482 struct sdhci_host *host = (struct sdhci_host *)data;
2483 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2484 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302485 const struct sdhci_msm_offset *msm_host_offset =
2486 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302487 u8 irq_status = 0;
2488 u8 irq_ack = 0;
2489 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302490 int pwr_state = 0, io_level = 0;
2491 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302492 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302493
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302494 irq_status = sdhci_msm_readb_relaxed(host,
2495 msm_host_offset->CORE_PWRCTL_STATUS);
2496
Asutosh Das0ef24812012-12-18 16:14:02 +05302497 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2498 mmc_hostname(msm_host->mmc), irq, irq_status);
2499
2500 /* Clear the interrupt */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302501 sdhci_msm_writeb_relaxed(irq_status, host,
2502 msm_host_offset->CORE_PWRCTL_CLEAR);
2503
Asutosh Das0ef24812012-12-18 16:14:02 +05302504 /*
2505 * SDHC has core_mem and hc_mem device memory and these memory
2506 * addresses do not fall within 1KB region. Hence, any update to
2507 * core_mem address space would require an mb() to ensure this gets
2508 * completed before its next update to registers within hc_mem.
2509 */
2510 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302511 /*
2512 * There is a rare HW scenario where the first clear pulse could be
2513 * lost when actual reset and clear/read of status register is
2514 * happening at a time. Hence, retry for at least 10 times to make
2515 * sure status register is cleared. Otherwise, this will result in
2516 * a spurious power IRQ resulting in system instability.
2517 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302518 while (irq_status & sdhci_msm_readb_relaxed(host,
2519 msm_host_offset->CORE_PWRCTL_STATUS)) {
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302520 if (retry == 0) {
2521 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2522 mmc_hostname(host->mmc), irq_status);
2523 sdhci_msm_dump_pwr_ctrl_regs(host);
2524 BUG_ON(1);
2525 }
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302526 sdhci_msm_writeb_relaxed(irq_status, host,
2527 msm_host_offset->CORE_PWRCTL_CLEAR);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302528 retry--;
2529 udelay(10);
2530 }
2531 if (likely(retry < 10))
2532 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2533 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302534
2535 /* Handle BUS ON/OFF*/
2536 if (irq_status & CORE_PWRCTL_BUS_ON) {
2537 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302538 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302539 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302540 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2541 VDD_IO_HIGH, 0);
2542 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302543 if (ret)
2544 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2545 else
2546 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302547
2548 pwr_state = REQ_BUS_ON;
2549 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302550 }
2551 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2552 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302553 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302554 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302555 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2556 VDD_IO_LOW, 0);
2557 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302558 if (ret)
2559 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2560 else
2561 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302562
2563 pwr_state = REQ_BUS_OFF;
2564 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302565 }
2566 /* Handle IO LOW/HIGH */
2567 if (irq_status & CORE_PWRCTL_IO_LOW) {
2568 /* Switch voltage Low */
2569 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2570 if (ret)
2571 irq_ack |= CORE_PWRCTL_IO_FAIL;
2572 else
2573 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302574
2575 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302576 }
2577 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2578 /* Switch voltage High */
2579 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2580 if (ret)
2581 irq_ack |= CORE_PWRCTL_IO_FAIL;
2582 else
2583 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302584
2585 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302586 }
2587
2588 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302589 sdhci_msm_writeb_relaxed(irq_ack, host,
2590 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302591 /*
2592 * SDHC has core_mem and hc_mem device memory and these memory
2593 * addresses do not fall within 1KB region. Hence, any update to
2594 * core_mem address space would require an mb() to ensure this gets
2595 * completed before its next update to registers within hc_mem.
2596 */
2597 mb();
2598
Krishna Konda46fd1432014-10-30 21:13:27 -07002599 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302600 writel_relaxed((readl_relaxed(host->ioaddr +
2601 msm_host_offset->CORE_VENDOR_SPEC) &
2602 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2603 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002604 else if ((io_level & REQ_IO_LOW) ||
2605 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302606 writel_relaxed((readl_relaxed(host->ioaddr +
2607 msm_host_offset->CORE_VENDOR_SPEC) |
2608 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2609 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002610 mb();
2611
Asutosh Das0ef24812012-12-18 16:14:02 +05302612 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2613 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302614 spin_lock_irqsave(&host->lock, flags);
2615 if (pwr_state)
2616 msm_host->curr_pwr_state = pwr_state;
2617 if (io_level)
2618 msm_host->curr_io_level = io_level;
2619 complete(&msm_host->pwr_irq_completion);
2620 spin_unlock_irqrestore(&host->lock, flags);
2621
Asutosh Das0ef24812012-12-18 16:14:02 +05302622 return IRQ_HANDLED;
2623}
2624
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302625static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302626show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2627{
2628 struct sdhci_host *host = dev_get_drvdata(dev);
2629 int poll;
2630 unsigned long flags;
2631
2632 spin_lock_irqsave(&host->lock, flags);
2633 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2634 spin_unlock_irqrestore(&host->lock, flags);
2635
2636 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2637}
2638
2639static ssize_t
2640store_polling(struct device *dev, struct device_attribute *attr,
2641 const char *buf, size_t count)
2642{
2643 struct sdhci_host *host = dev_get_drvdata(dev);
2644 int value;
2645 unsigned long flags;
2646
2647 if (!kstrtou32(buf, 0, &value)) {
2648 spin_lock_irqsave(&host->lock, flags);
2649 if (value) {
2650 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2651 mmc_detect_change(host->mmc, 0);
2652 } else {
2653 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2654 }
2655 spin_unlock_irqrestore(&host->lock, flags);
2656 }
2657 return count;
2658}
2659
2660static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302661show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2662 char *buf)
2663{
2664 struct sdhci_host *host = dev_get_drvdata(dev);
2665 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2666 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2667
2668 return snprintf(buf, PAGE_SIZE, "%u\n",
2669 msm_host->msm_bus_vote.is_max_bw_needed);
2670}
2671
2672static ssize_t
2673store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2674 const char *buf, size_t count)
2675{
2676 struct sdhci_host *host = dev_get_drvdata(dev);
2677 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2678 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2679 uint32_t value;
2680 unsigned long flags;
2681
2682 if (!kstrtou32(buf, 0, &value)) {
2683 spin_lock_irqsave(&host->lock, flags);
2684 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2685 spin_unlock_irqrestore(&host->lock, flags);
2686 }
2687 return count;
2688}
2689
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302690static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302691{
2692 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2693 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302694 const struct sdhci_msm_offset *msm_host_offset =
2695 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302696 unsigned long flags;
2697 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302698 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302699
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302700 spin_lock_irqsave(&host->lock, flags);
2701 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2702 mmc_hostname(host->mmc), __func__, req_type,
2703 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302704 io_sig_sts = sdhci_msm_readl_relaxed(host,
2705 msm_host_offset->CORE_GENERICS);
2706
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302707 /*
2708 * The IRQ for request type IO High/Low will be generated when -
2709 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2710 * 2. If 1 is true and when there is a state change in 1.8V enable
2711 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2712 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2713 * layer tries to set it to 3.3V before card detection happens, the
2714 * IRQ doesn't get triggered as there is no state change in this bit.
2715 * The driver already handles this case by changing the IO voltage
2716 * level to high as part of controller power up sequence. Hence, check
2717 * for host->pwr to handle a case where IO voltage high request is
2718 * issued even before controller power up.
2719 */
2720 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2721 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2722 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2723 pr_debug("%s: do not wait for power IRQ that never comes\n",
2724 mmc_hostname(host->mmc));
2725 spin_unlock_irqrestore(&host->lock, flags);
2726 return;
2727 }
2728 }
2729
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302730 if ((req_type & msm_host->curr_pwr_state) ||
2731 (req_type & msm_host->curr_io_level))
2732 done = true;
2733 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302734
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302735 /*
2736 * This is needed here to hanlde a case where IRQ gets
2737 * triggered even before this function is called so that
2738 * x->done counter of completion gets reset. Otherwise,
2739 * next call to wait_for_completion returns immediately
2740 * without actually waiting for the IRQ to be handled.
2741 */
2742 if (done)
2743 init_completion(&msm_host->pwr_irq_completion);
2744 else
2745 wait_for_completion(&msm_host->pwr_irq_completion);
2746
2747 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2748 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302749}
2750
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002751static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2752{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2754 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2755 const struct sdhci_msm_offset *msm_host_offset =
2756 msm_host->offset;
2757 u32 config = readl_relaxed(host->ioaddr +
2758 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302759
2760 if (enable) {
2761 config |= CORE_CDR_EN;
2762 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302763 writel_relaxed(config, host->ioaddr +
2764 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302765 } else {
2766 config &= ~CORE_CDR_EN;
2767 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302768 writel_relaxed(config, host->ioaddr +
2769 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302770 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002771}
2772
Asutosh Das648f9d12013-01-10 21:11:04 +05302773static unsigned int sdhci_msm_max_segs(void)
2774{
2775 return SDHCI_MSM_MAX_SEGMENTS;
2776}
2777
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302778static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302779{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302782
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302783 return msm_host->pdata->sup_clk_table[0];
2784}
2785
2786static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2787{
2788 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2789 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2790 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2791
2792 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2793}
2794
2795static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2796 u32 req_clk)
2797{
2798 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2799 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2800 unsigned int sel_clk = -1;
2801 unsigned char cnt;
2802
2803 if (req_clk < sdhci_msm_get_min_clock(host)) {
2804 sel_clk = sdhci_msm_get_min_clock(host);
2805 return sel_clk;
2806 }
2807
2808 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2809 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2810 break;
2811 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2812 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2813 break;
2814 } else {
2815 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2816 }
2817 }
2818 return sel_clk;
2819}
2820
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302821static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2822{
2823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2824 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2825 int rc = 0;
2826
2827 if (atomic_read(&msm_host->controller_clock))
2828 return 0;
2829
2830 sdhci_msm_bus_voting(host, 1);
2831
2832 if (!IS_ERR(msm_host->pclk)) {
2833 rc = clk_prepare_enable(msm_host->pclk);
2834 if (rc) {
2835 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2836 mmc_hostname(host->mmc), __func__, rc);
2837 goto remove_vote;
2838 }
2839 }
2840
2841 rc = clk_prepare_enable(msm_host->clk);
2842 if (rc) {
2843 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2844 mmc_hostname(host->mmc), __func__, rc);
2845 goto disable_pclk;
2846 }
2847
2848 atomic_set(&msm_host->controller_clock, 1);
2849 pr_debug("%s: %s: enabled controller clock\n",
2850 mmc_hostname(host->mmc), __func__);
2851 goto out;
2852
2853disable_pclk:
2854 if (!IS_ERR(msm_host->pclk))
2855 clk_disable_unprepare(msm_host->pclk);
2856remove_vote:
2857 if (msm_host->msm_bus_vote.client_handle)
2858 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2859out:
2860 return rc;
2861}
2862
2863
2864
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302865static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2866{
2867 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2868 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2869 int rc = 0;
2870
2871 if (enable && !atomic_read(&msm_host->clks_on)) {
2872 pr_debug("%s: request to enable clocks\n",
2873 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302874
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302875 /*
2876 * The bus-width or the clock rate might have changed
2877 * after controller clocks are enbaled, update bus vote
2878 * in such case.
2879 */
2880 if (atomic_read(&msm_host->controller_clock))
2881 sdhci_msm_bus_voting(host, 1);
2882
2883 rc = sdhci_msm_enable_controller_clock(host);
2884 if (rc)
2885 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302886
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302887 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2888 rc = clk_prepare_enable(msm_host->bus_clk);
2889 if (rc) {
2890 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2891 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302892 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302893 }
2894 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002895 if (!IS_ERR(msm_host->ff_clk)) {
2896 rc = clk_prepare_enable(msm_host->ff_clk);
2897 if (rc) {
2898 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2899 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302900 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002901 }
2902 }
2903 if (!IS_ERR(msm_host->sleep_clk)) {
2904 rc = clk_prepare_enable(msm_host->sleep_clk);
2905 if (rc) {
2906 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2907 mmc_hostname(host->mmc), __func__, rc);
2908 goto disable_ff_clk;
2909 }
2910 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302911 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302912
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302913 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302914 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2915 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302916 /*
2917 * During 1.8V signal switching the clock source must
2918 * still be ON as it requires accessing SDHC
2919 * registers (SDHCi host control2 register bit 3 must
2920 * be written and polled after stopping the SDCLK).
2921 */
2922 if (host->mmc->card_clock_off)
2923 return 0;
2924 pr_debug("%s: request to disable clocks\n",
2925 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002926 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2927 clk_disable_unprepare(msm_host->sleep_clk);
2928 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2929 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302930 clk_disable_unprepare(msm_host->clk);
2931 if (!IS_ERR(msm_host->pclk))
2932 clk_disable_unprepare(msm_host->pclk);
2933 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2934 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302935
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302936 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302937 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302938 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302939 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302940 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002941disable_ff_clk:
2942 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2943 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302944disable_bus_clk:
2945 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2946 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302947disable_controller_clk:
2948 if (!IS_ERR_OR_NULL(msm_host->clk))
2949 clk_disable_unprepare(msm_host->clk);
2950 if (!IS_ERR_OR_NULL(msm_host->pclk))
2951 clk_disable_unprepare(msm_host->pclk);
2952 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302953remove_vote:
2954 if (msm_host->msm_bus_vote.client_handle)
2955 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302956out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302957 return rc;
2958}
2959
2960static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2961{
2962 int rc;
2963 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2964 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302965 const struct sdhci_msm_offset *msm_host_offset =
2966 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002967 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302968 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002969 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302970 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302971
2972 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302973 /*
2974 * disable pwrsave to ensure clock is not auto-gated until
2975 * the rate is >400KHz (initialization complete).
2976 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302977 writel_relaxed(readl_relaxed(host->ioaddr +
2978 msm_host_offset->CORE_VENDOR_SPEC) &
2979 ~CORE_CLK_PWRSAVE, host->ioaddr +
2980 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302981 sdhci_msm_prepare_clocks(host, false);
2982 host->clock = clock;
2983 goto out;
2984 }
2985
2986 rc = sdhci_msm_prepare_clocks(host, true);
2987 if (rc)
2988 goto out;
2989
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302990 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
2991 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302992 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07002993 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302994 writel_relaxed(readl_relaxed(host->ioaddr +
2995 msm_host_offset->CORE_VENDOR_SPEC)
2996 | CORE_CLK_PWRSAVE, host->ioaddr +
2997 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05302998 /*
2999 * Disable pwrsave for a newly added card if doesn't allow clock
3000 * gating.
3001 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003002 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303003 writel_relaxed(readl_relaxed(host->ioaddr +
3004 msm_host_offset->CORE_VENDOR_SPEC)
3005 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3006 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303007
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303008 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003009 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003010 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003011 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303012 /*
3013 * The SDHC requires internal clock frequency to be double the
3014 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003015 * uses the faster clock(100/400MHz) for some of its parts and
3016 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303017 */
3018 ddr_clock = clock * 2;
3019 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3020 ddr_clock);
3021 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003022
3023 /*
3024 * In general all timing modes are controlled via UHS mode select in
3025 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3026 * their respective modes defined here, hence we use these values.
3027 *
3028 * HS200 - SDR104 (Since they both are equivalent in functionality)
3029 * HS400 - This involves multiple configurations
3030 * Initially SDR104 - when tuning is required as HS200
3031 * Then when switching to DDR @ 400MHz (HS400) we use
3032 * the vendor specific HC_SELECT_IN to control the mode.
3033 *
3034 * In addition to controlling the modes we also need to select the
3035 * correct input clock for DLL depending on the mode.
3036 *
3037 * HS400 - divided clock (free running MCLK/2)
3038 * All other modes - default (free running MCLK)
3039 */
3040 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3041 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303042 writel_relaxed(((readl_relaxed(host->ioaddr +
3043 msm_host_offset->CORE_VENDOR_SPEC)
3044 & ~CORE_HC_MCLK_SEL_MASK)
3045 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3046 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003047 /*
3048 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3049 * register
3050 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303051 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003052 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303053 msm_host->enhanced_strobe)) &&
3054 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003055 /*
3056 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3057 * field in VENDOR_SPEC_FUNC
3058 */
3059 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303060 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003061 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303062 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3063 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003064 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003065 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3066 /*
3067 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3068 * CORE_DLL_STATUS to be set. This should get set
3069 * with in 15 us at 200 MHz.
3070 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303071 rc = readl_poll_timeout(host->ioaddr +
3072 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003073 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3074 CORE_DDR_DLL_LOCK)), 10, 1000);
3075 if (rc == -ETIMEDOUT)
3076 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3077 mmc_hostname(host->mmc),
3078 dll_lock);
3079 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003080 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003081 if (!msm_host->use_cdclp533)
3082 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3083 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303084 msm_host_offset->CORE_VENDOR_SPEC3)
3085 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3086 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003087
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003088 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303089 writel_relaxed(((readl_relaxed(host->ioaddr +
3090 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003091 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303092 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3093 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003094
3095 /*
3096 * Disable HC_SELECT_IN to be able to use the UHS mode select
3097 * configuration from Host Control2 register for all other
3098 * modes.
3099 *
3100 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3101 * in VENDOR_SPEC_FUNC
3102 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303103 writel_relaxed((readl_relaxed(host->ioaddr +
3104 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003105 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303106 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3107 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003108 }
3109 mb();
3110
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303111 if (sup_clock != msm_host->clk_rate) {
3112 pr_debug("%s: %s: setting clk rate to %u\n",
3113 mmc_hostname(host->mmc), __func__, sup_clock);
3114 rc = clk_set_rate(msm_host->clk, sup_clock);
3115 if (rc) {
3116 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3117 mmc_hostname(host->mmc), __func__,
3118 sup_clock, rc);
3119 goto out;
3120 }
3121 msm_host->clk_rate = sup_clock;
3122 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303123 /*
3124 * Update the bus vote in case of frequency change due to
3125 * clock scaling.
3126 */
3127 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303128 }
3129out:
3130 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303131}
3132
Sahitya Tummala14613432013-03-21 11:13:25 +05303133static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3134 unsigned int uhs)
3135{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003136 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3137 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303138 const struct sdhci_msm_offset *msm_host_offset =
3139 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303140 u16 ctrl_2;
3141
3142 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3143 /* Select Bus Speed Mode for host */
3144 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003145 if ((uhs == MMC_TIMING_MMC_HS400) ||
3146 (uhs == MMC_TIMING_MMC_HS200) ||
3147 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303148 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3149 else if (uhs == MMC_TIMING_UHS_SDR12)
3150 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3151 else if (uhs == MMC_TIMING_UHS_SDR25)
3152 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3153 else if (uhs == MMC_TIMING_UHS_SDR50)
3154 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003155 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3156 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303157 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303158 /*
3159 * When clock frquency is less than 100MHz, the feedback clock must be
3160 * provided and DLL must not be used so that tuning can be skipped. To
3161 * provide feedback clock, the mode selection can be any value less
3162 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3163 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003164 if (host->clock <= CORE_FREQ_100MHZ) {
3165 if ((uhs == MMC_TIMING_MMC_HS400) ||
3166 (uhs == MMC_TIMING_MMC_HS200) ||
3167 (uhs == MMC_TIMING_UHS_SDR104))
3168 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303169
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003170 /*
3171 * Make sure DLL is disabled when not required
3172 *
3173 * Write 1 to DLL_RST bit of DLL_CONFIG register
3174 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303175 writel_relaxed((readl_relaxed(host->ioaddr +
3176 msm_host_offset->CORE_DLL_CONFIG)
3177 | CORE_DLL_RST), host->ioaddr +
3178 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003179
3180 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303181 writel_relaxed((readl_relaxed(host->ioaddr +
3182 msm_host_offset->CORE_DLL_CONFIG)
3183 | CORE_DLL_PDN), host->ioaddr +
3184 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003185 mb();
3186
3187 /*
3188 * The DLL needs to be restored and CDCLP533 recalibrated
3189 * when the clock frequency is set back to 400MHz.
3190 */
3191 msm_host->calibration_done = false;
3192 }
3193
3194 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3195 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303196 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3197
3198}
3199
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003200#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003201#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303202static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003203{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303204 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303205 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3206 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303207 const struct sdhci_msm_offset *msm_host_offset =
3208 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303209 struct cmdq_host *cq_host = host->cq_host;
3210
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303211 u32 version = sdhci_msm_readl_relaxed(host,
3212 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003213 u16 minor = version & CORE_VERSION_TARGET_MASK;
3214 /* registers offset changed starting from 4.2.0 */
3215 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3216
3217 pr_err("---- Debug RAM dump ----\n");
3218 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3219 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3220 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3221
3222 while (i < 16) {
3223 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3224 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3225 i++;
3226 }
3227 pr_err("-------------------------\n");
3228}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303229
3230void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3231{
3232 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3233 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303234 const struct sdhci_msm_offset *msm_host_offset =
3235 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303236 int tbsel, tbsel2;
3237 int i, index = 0;
3238 u32 test_bus_val = 0;
3239 u32 debug_reg[MAX_TEST_BUS] = {0};
3240
3241 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003242 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303243 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003244
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303245 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3246 sdhci_msm_readl_relaxed(host,
3247 msm_host_offset->CORE_MCI_DATA_CNT),
3248 sdhci_msm_readl_relaxed(host,
3249 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303250 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303251 sdhci_msm_readl_relaxed(host,
3252 msm_host_offset->CORE_MCI_DATA_CNT),
3253 sdhci_msm_readl_relaxed(host,
3254 msm_host_offset->CORE_MCI_FIFO_CNT),
3255 sdhci_msm_readl_relaxed(host,
3256 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303257 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303258 readl_relaxed(host->ioaddr +
3259 msm_host_offset->CORE_DLL_CONFIG),
3260 readl_relaxed(host->ioaddr +
3261 msm_host_offset->CORE_DLL_STATUS),
3262 sdhci_msm_readl_relaxed(host,
3263 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303264 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303265 readl_relaxed(host->ioaddr +
3266 msm_host_offset->CORE_VENDOR_SPEC),
3267 readl_relaxed(host->ioaddr +
3268 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3269 readl_relaxed(host->ioaddr +
3270 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303271 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303272 readl_relaxed(host->ioaddr +
3273 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303274
3275 /*
3276 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3277 * of CORE_TESTBUS_CONFIG register.
3278 *
3279 * To select test bus 0 to 7 use tbsel and to select any test bus
3280 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3281 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3282 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3283 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003284 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303285 for (tbsel = 0; tbsel < 8; tbsel++) {
3286 if (index >= MAX_TEST_BUS)
3287 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303288 test_bus_val =
3289 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3290 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3291 sdhci_msm_writel_relaxed(test_bus_val, host,
3292 msm_host_offset->CORE_TESTBUS_CONFIG);
3293 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3294 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303295 }
3296 }
3297 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3298 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3299 i, i + 3, debug_reg[i], debug_reg[i+1],
3300 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003301}
3302
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303303/*
3304 * sdhci_msm_enhanced_strobe_mask :-
3305 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3306 * SW should write 3 to
3307 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3308 * The default reset value of this register is 2.
3309 */
3310static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3311{
3312 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3313 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303314 const struct sdhci_msm_offset *msm_host_offset =
3315 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303316
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303317 if (!msm_host->enhanced_strobe ||
3318 !mmc_card_strobe(msm_host->mmc->card)) {
3319 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303320 mmc_hostname(host->mmc));
3321 return;
3322 }
3323
3324 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303325 writel_relaxed((readl_relaxed(host->ioaddr +
3326 msm_host_offset->CORE_VENDOR_SPEC3)
3327 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3328 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303329 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303330 writel_relaxed((readl_relaxed(host->ioaddr +
3331 msm_host_offset->CORE_VENDOR_SPEC3)
3332 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3333 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303334 }
3335}
3336
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003337static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3338{
3339 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3340 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303341 const struct sdhci_msm_offset *msm_host_offset =
3342 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003343
3344 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303345 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3346 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003347 } else {
3348 u32 value;
3349
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303350 value = sdhci_msm_readl_relaxed(host,
3351 msm_host_offset->CORE_TESTBUS_CONFIG);
3352 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3353 sdhci_msm_writel_relaxed(value, host,
3354 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003355 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303356}
3357
Pavan Anamula691dd592015-08-25 16:11:20 +05303358void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3359{
3360 u32 vendor_func2;
3361 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303362 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3363 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3364 const struct sdhci_msm_offset *msm_host_offset =
3365 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303366
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303367 vendor_func2 = readl_relaxed(host->ioaddr +
3368 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303369
3370 if (enable) {
3371 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303372 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303373 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303374 while (readl_relaxed(host->ioaddr +
3375 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303376 if (timeout == 0) {
3377 pr_info("%s: Applying wait idle disable workaround\n",
3378 mmc_hostname(host->mmc));
3379 /*
3380 * Apply the reset workaround to not wait for
3381 * pending data transfers on AXI before
3382 * resetting the controller. This could be
3383 * risky if the transfers were stuck on the
3384 * AXI bus.
3385 */
3386 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303387 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303388 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303389 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3390 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303391 host->reset_wa_t = ktime_get();
3392 return;
3393 }
3394 timeout--;
3395 udelay(10);
3396 }
3397 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3398 mmc_hostname(host->mmc));
3399 } else {
3400 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303401 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303402 }
3403}
3404
Gilad Broner44445992015-09-29 16:05:39 +03003405static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3406{
3407 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303408 container_of(work, struct sdhci_msm_pm_qos_irq,
3409 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003410
3411 if (atomic_read(&pm_qos_irq->counter))
3412 return;
3413
3414 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3415 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3416}
3417
3418void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3419{
3420 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3421 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3422 struct sdhci_msm_pm_qos_latency *latency =
3423 &msm_host->pdata->pm_qos_data.irq_latency;
3424 int counter;
3425
3426 if (!msm_host->pm_qos_irq.enabled)
3427 return;
3428
3429 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3430 /* Make sure to update the voting in case power policy has changed */
3431 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3432 && counter > 1)
3433 return;
3434
Asutosh Das36c2e922015-12-01 12:19:58 +05303435 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003436 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3437 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3438 msm_host->pm_qos_irq.latency);
3439}
3440
3441void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3442{
3443 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3444 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3445 int counter;
3446
3447 if (!msm_host->pm_qos_irq.enabled)
3448 return;
3449
Subhash Jadavani4d813902015-10-15 12:16:43 -07003450 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3451 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3452 } else {
3453 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3454 return;
Gilad Broner44445992015-09-29 16:05:39 +03003455 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003456
Gilad Broner44445992015-09-29 16:05:39 +03003457 if (counter)
3458 return;
3459
3460 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303461 schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
3462 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003463 return;
3464 }
3465
3466 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3467 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3468 msm_host->pm_qos_irq.latency);
3469}
3470
Gilad Broner68c54562015-09-20 11:59:46 +03003471static ssize_t
3472sdhci_msm_pm_qos_irq_show(struct device *dev,
3473 struct device_attribute *attr, char *buf)
3474{
3475 struct sdhci_host *host = dev_get_drvdata(dev);
3476 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3477 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3478 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3479
3480 return snprintf(buf, PAGE_SIZE,
3481 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3482 irq->enabled, atomic_read(&irq->counter), irq->latency);
3483}
3484
3485static ssize_t
3486sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3487 struct device_attribute *attr, char *buf)
3488{
3489 struct sdhci_host *host = dev_get_drvdata(dev);
3490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3491 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3492
3493 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3494}
3495
3496static ssize_t
3497sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3498 struct device_attribute *attr, const char *buf, size_t count)
3499{
3500 struct sdhci_host *host = dev_get_drvdata(dev);
3501 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3502 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3503 uint32_t value;
3504 bool enable;
3505 int ret;
3506
3507 ret = kstrtou32(buf, 0, &value);
3508 if (ret)
3509 goto out;
3510 enable = !!value;
3511
3512 if (enable == msm_host->pm_qos_irq.enabled)
3513 goto out;
3514
3515 msm_host->pm_qos_irq.enabled = enable;
3516 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303517 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003518 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3519 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3520 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3521 msm_host->pm_qos_irq.latency);
3522 }
3523
3524out:
3525 return count;
3526}
3527
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003528#ifdef CONFIG_SMP
3529static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3530 struct sdhci_host *host)
3531{
3532 msm_host->pm_qos_irq.req.irq = host->irq;
3533}
3534#else
3535static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3536 struct sdhci_host *host) { }
3537#endif
3538
Gilad Broner44445992015-09-29 16:05:39 +03003539void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3540{
3541 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3542 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3543 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003544 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003545
3546 if (!msm_host->pdata->pm_qos_data.irq_valid)
3547 return;
3548
3549 /* Initialize only once as this gets called per partition */
3550 if (msm_host->pm_qos_irq.enabled)
3551 return;
3552
3553 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3554 msm_host->pm_qos_irq.req.type =
3555 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003556 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3557 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3558 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003559 else
3560 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3561 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3562
Asutosh Das36c2e922015-12-01 12:19:58 +05303563 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003564 sdhci_msm_pm_qos_irq_unvote_work);
3565 /* For initialization phase, set the performance latency */
3566 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3567 msm_host->pm_qos_irq.latency =
3568 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3569 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3570 msm_host->pm_qos_irq.latency);
3571 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003572
3573 /* sysfs */
3574 msm_host->pm_qos_irq.enable_attr.show =
3575 sdhci_msm_pm_qos_irq_enable_show;
3576 msm_host->pm_qos_irq.enable_attr.store =
3577 sdhci_msm_pm_qos_irq_enable_store;
3578 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3579 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3580 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3581 ret = device_create_file(&msm_host->pdev->dev,
3582 &msm_host->pm_qos_irq.enable_attr);
3583 if (ret)
3584 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3585 __func__, ret);
3586
3587 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3588 msm_host->pm_qos_irq.status_attr.store = NULL;
3589 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3590 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3591 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3592 ret = device_create_file(&msm_host->pdev->dev,
3593 &msm_host->pm_qos_irq.status_attr);
3594 if (ret)
3595 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3596 __func__, ret);
3597}
3598
3599static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3600 struct device_attribute *attr, char *buf)
3601{
3602 struct sdhci_host *host = dev_get_drvdata(dev);
3603 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3604 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3605 struct sdhci_msm_pm_qos_group *group;
3606 int i;
3607 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3608 int offset = 0;
3609
3610 for (i = 0; i < nr_groups; i++) {
3611 group = &msm_host->pm_qos[i];
3612 offset += snprintf(&buf[offset], PAGE_SIZE,
3613 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3614 i, group->req.cpus_affine.bits[0],
3615 msm_host->pm_qos_group_enable,
3616 atomic_read(&group->counter),
3617 group->latency);
3618 }
3619
3620 return offset;
3621}
3622
3623static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3624 struct device_attribute *attr, char *buf)
3625{
3626 struct sdhci_host *host = dev_get_drvdata(dev);
3627 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3628 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3629
3630 return snprintf(buf, PAGE_SIZE, "%s\n",
3631 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3632}
3633
3634static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3635 struct device_attribute *attr, const char *buf, size_t count)
3636{
3637 struct sdhci_host *host = dev_get_drvdata(dev);
3638 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3639 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3640 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3641 uint32_t value;
3642 bool enable;
3643 int ret;
3644 int i;
3645
3646 ret = kstrtou32(buf, 0, &value);
3647 if (ret)
3648 goto out;
3649 enable = !!value;
3650
3651 if (enable == msm_host->pm_qos_group_enable)
3652 goto out;
3653
3654 msm_host->pm_qos_group_enable = enable;
3655 if (!enable) {
3656 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303657 cancel_delayed_work_sync(
3658 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03003659 atomic_set(&msm_host->pm_qos[i].counter, 0);
3660 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3661 pm_qos_update_request(&msm_host->pm_qos[i].req,
3662 msm_host->pm_qos[i].latency);
3663 }
3664 }
3665
3666out:
3667 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003668}
3669
3670static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3671{
3672 int i;
3673 struct sdhci_msm_cpu_group_map *map =
3674 &msm_host->pdata->pm_qos_data.cpu_group_map;
3675
3676 if (cpu < 0)
3677 goto not_found;
3678
3679 for (i = 0; i < map->nr_groups; i++)
3680 if (cpumask_test_cpu(cpu, &map->mask[i]))
3681 return i;
3682
3683not_found:
3684 return -EINVAL;
3685}
3686
3687void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3688 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3689{
3690 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3691 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3692 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3693 struct sdhci_msm_pm_qos_group *pm_qos_group;
3694 int counter;
3695
3696 if (!msm_host->pm_qos_group_enable || group < 0)
3697 return;
3698
3699 pm_qos_group = &msm_host->pm_qos[group];
3700 counter = atomic_inc_return(&pm_qos_group->counter);
3701
3702 /* Make sure to update the voting in case power policy has changed */
3703 if (pm_qos_group->latency == latency->latency[host->power_policy]
3704 && counter > 1)
3705 return;
3706
Asutosh Das36c2e922015-12-01 12:19:58 +05303707 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003708
3709 pm_qos_group->latency = latency->latency[host->power_policy];
3710 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3711}
3712
3713static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3714{
3715 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05303716 container_of(work, struct sdhci_msm_pm_qos_group,
3717 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003718
3719 if (atomic_read(&group->counter))
3720 return;
3721
3722 group->latency = PM_QOS_DEFAULT_VALUE;
3723 pm_qos_update_request(&group->req, group->latency);
3724}
3725
Gilad Broner07d92eb2015-09-29 16:57:21 +03003726bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003727{
3728 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3729 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3730 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3731
3732 if (!msm_host->pm_qos_group_enable || group < 0 ||
3733 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003734 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003735
3736 if (async) {
Asutosh Das36c2e922015-12-01 12:19:58 +05303737 schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
3738 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03003739 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003740 }
3741
3742 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3743 pm_qos_update_request(&msm_host->pm_qos[group].req,
3744 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003745 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003746}
3747
3748void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3749 struct sdhci_msm_pm_qos_latency *latency)
3750{
3751 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3752 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3753 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3754 struct sdhci_msm_pm_qos_group *group;
3755 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003756 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003757
3758 if (msm_host->pm_qos_group_enable)
3759 return;
3760
3761 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3762 GFP_KERNEL);
3763 if (!msm_host->pm_qos)
3764 return;
3765
3766 for (i = 0; i < nr_groups; i++) {
3767 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05303768 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03003769 sdhci_msm_pm_qos_cpu_unvote_work);
3770 atomic_set(&group->counter, 0);
3771 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3772 cpumask_copy(&group->req.cpus_affine,
3773 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3774 /* For initialization phase, set the performance mode latency */
3775 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3776 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3777 group->latency);
3778 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3779 __func__, i,
3780 group->req.cpus_affine.bits[0],
3781 group->latency,
3782 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3783 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003784 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003785 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003786
3787 /* sysfs */
3788 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3789 msm_host->pm_qos_group_status_attr.store = NULL;
3790 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3791 msm_host->pm_qos_group_status_attr.attr.name =
3792 "pm_qos_cpu_groups_status";
3793 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3794 ret = device_create_file(&msm_host->pdev->dev,
3795 &msm_host->pm_qos_group_status_attr);
3796 if (ret)
3797 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3798 __func__, ret);
3799 msm_host->pm_qos_group_enable_attr.show =
3800 sdhci_msm_pm_qos_group_enable_show;
3801 msm_host->pm_qos_group_enable_attr.store =
3802 sdhci_msm_pm_qos_group_enable_store;
3803 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3804 msm_host->pm_qos_group_enable_attr.attr.name =
3805 "pm_qos_cpu_groups_enable";
3806 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3807 ret = device_create_file(&msm_host->pdev->dev,
3808 &msm_host->pm_qos_group_enable_attr);
3809 if (ret)
3810 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3811 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003812}
3813
Gilad Broner07d92eb2015-09-29 16:57:21 +03003814static void sdhci_msm_pre_req(struct sdhci_host *host,
3815 struct mmc_request *mmc_req)
3816{
3817 int cpu;
3818 int group;
3819 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3820 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3821 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3822 msm_host->pm_qos_prev_cpu);
3823
3824 sdhci_msm_pm_qos_irq_vote(host);
3825
3826 cpu = get_cpu();
3827 put_cpu();
3828 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3829 if (group < 0)
3830 return;
3831
3832 if (group != prev_group && prev_group >= 0) {
3833 sdhci_msm_pm_qos_cpu_unvote(host,
3834 msm_host->pm_qos_prev_cpu, false);
3835 prev_group = -1; /* make sure to vote for new group */
3836 }
3837
3838 if (prev_group < 0) {
3839 sdhci_msm_pm_qos_cpu_vote(host,
3840 msm_host->pdata->pm_qos_data.latency, cpu);
3841 msm_host->pm_qos_prev_cpu = cpu;
3842 }
3843}
3844
3845static void sdhci_msm_post_req(struct sdhci_host *host,
3846 struct mmc_request *mmc_req)
3847{
3848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3850
3851 sdhci_msm_pm_qos_irq_unvote(host, false);
3852
3853 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3854 msm_host->pm_qos_prev_cpu = -1;
3855}
3856
3857static void sdhci_msm_init(struct sdhci_host *host)
3858{
3859 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3860 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3861
3862 sdhci_msm_pm_qos_irq_init(host);
3863
3864 if (msm_host->pdata->pm_qos_data.legacy_valid)
3865 sdhci_msm_pm_qos_cpu_init(host,
3866 msm_host->pdata->pm_qos_data.latency);
3867}
3868
Sahitya Tummala9150a942014-10-31 15:33:04 +05303869static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
3870{
3871 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3872 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3873 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
3874 u32 max_curr = 0;
3875
3876 if (curr_slot && curr_slot->vdd_data)
3877 max_curr = curr_slot->vdd_data->hpm_uA;
3878
3879 return max_curr;
3880}
3881
Asutosh Das0ef24812012-12-18 16:14:02 +05303882static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303883 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303884 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003885 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303886 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003887 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303888 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303889 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303890 .get_min_clock = sdhci_msm_get_min_clock,
3891 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303892 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303893 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303894 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003895 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003896 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003897 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303898 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05303899 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003900 .init = sdhci_msm_init,
3901 .pre_req = sdhci_msm_pre_req,
3902 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05303903 .get_current_limit = sdhci_msm_get_current_limit,
Asutosh Das0ef24812012-12-18 16:14:02 +05303904};
3905
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303906static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3907 struct sdhci_host *host)
3908{
Krishna Konda46fd1432014-10-30 21:13:27 -07003909 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303910 u16 minor;
3911 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303912 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303913 const struct sdhci_msm_offset *msm_host_offset =
3914 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303915
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303916 version = sdhci_msm_readl_relaxed(host,
3917 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303918 major = (version & CORE_VERSION_MAJOR_MASK) >>
3919 CORE_VERSION_MAJOR_SHIFT;
3920 minor = version & CORE_VERSION_TARGET_MASK;
3921
Krishna Konda46fd1432014-10-30 21:13:27 -07003922 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3923
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303924 /*
3925 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003926 * controller won't advertise 3.0v, 1.8v and 8-bit features
3927 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303928 */
3929 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003930 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003931 /*
3932 * Enable 1.8V support capability on controllers that
3933 * support dual voltage
3934 */
3935 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003936 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3937 caps |= CORE_3_0V_SUPPORT;
3938 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003939 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303940 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3941 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303942 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003943
3944 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303945 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3946 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3947 */
3948 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303949 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303950 val = readl_relaxed(host->ioaddr +
3951 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303952 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303953 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303954 }
3955 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003956 * SDCC 5 controller with major version 1, minor version 0x34 and later
3957 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3958 */
3959 if ((major == 1) && (minor < 0x34))
3960 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003961
3962 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003963 * SDCC 5 controller with major version 1, minor version 0x42 and later
3964 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303965 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003966 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303967 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003968 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303969 msm_host->enhanced_strobe = true;
3970 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003971
3972 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003973 * SDCC 5 controller with major version 1 and minor version 0x42,
3974 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3975 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303976 * when MCLK is gated OFF, it is not gated for less than 0.5us
3977 * and MCLK must be switched on for at-least 1us before DATA
3978 * starts coming.
3979 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003980 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3981 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303982 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003983
Pavan Anamula5a256df2015-10-16 14:38:28 +05303984 /* Fake 3.0V support for SDIO devices which requires such voltage */
3985 if (msm_host->pdata->core_3_0v_support) {
3986 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303987 writel_relaxed((readl_relaxed(host->ioaddr +
3988 SDHCI_CAPABILITIES) | caps), host->ioaddr +
3989 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05303990 }
3991
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003992 if ((major == 1) && (minor >= 0x49))
3993 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303994 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003995 * Mask 64-bit support for controller with 32-bit address bus so that
3996 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003997 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003998 if (!msm_host->pdata->largeaddressbus)
3999 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4000
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304001 writel_relaxed(caps, host->ioaddr +
4002 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004003 /* keep track of the value in SDHCI_CAPABILITIES */
4004 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304005}
4006
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004007#ifdef CONFIG_MMC_CQ_HCI
4008static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4009 struct platform_device *pdev)
4010{
4011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4012 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4013
4014 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004015 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004016 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4017 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004018 host->cq_host = NULL;
4019 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004020 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004021 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004022}
4023#else
4024static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4025 struct platform_device *pdev)
4026{
4027
4028}
4029#endif
4030
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004031static bool sdhci_msm_is_bootdevice(struct device *dev)
4032{
4033 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4034 strlen(saved_command_line))) {
4035 char search_string[50];
4036
4037 snprintf(search_string, ARRAY_SIZE(search_string),
4038 "androidboot.bootdevice=%s", dev_name(dev));
4039 if (strnstr(saved_command_line, search_string,
4040 strlen(saved_command_line)))
4041 return true;
4042 else
4043 return false;
4044 }
4045
4046 /*
4047 * "androidboot.bootdevice=" argument is not present then
4048 * return true as we don't know the boot device anyways.
4049 */
4050 return true;
4051}
4052
Asutosh Das0ef24812012-12-18 16:14:02 +05304053static int sdhci_msm_probe(struct platform_device *pdev)
4054{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304055 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304056 struct sdhci_host *host;
4057 struct sdhci_pltfm_host *pltfm_host;
4058 struct sdhci_msm_host *msm_host;
4059 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004060 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004061 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004062 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304063 struct resource *tlmm_memres = NULL;
4064 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304065 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05304066
4067 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4068 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4069 GFP_KERNEL);
4070 if (!msm_host) {
4071 ret = -ENOMEM;
4072 goto out;
4073 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304074
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304075 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4076 msm_host->mci_removed = true;
4077 msm_host->offset = &sdhci_msm_offset_mci_removed;
4078 } else {
4079 msm_host->mci_removed = false;
4080 msm_host->offset = &sdhci_msm_offset_mci_present;
4081 }
4082 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304083 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4084 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4085 if (IS_ERR(host)) {
4086 ret = PTR_ERR(host);
4087 goto out;
4088 }
4089
4090 pltfm_host = sdhci_priv(host);
4091 pltfm_host->priv = msm_host;
4092 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304093 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304094
4095 /* Extract platform data */
4096 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004097 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304098 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004099 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4100 ret);
4101 goto pltfm_free;
4102 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004103
4104 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004105 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
4106 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004107 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004108 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004109
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004110 if (disable_slots & (1 << (ret - 1))) {
4111 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4112 ret);
4113 ret = -ENODEV;
4114 goto pltfm_free;
4115 }
4116
Sayali Lokhande5f768322016-04-11 18:36:53 +05304117 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004118 sdhci_slot[ret-1] = msm_host;
4119
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004120 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4121 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304122 if (!msm_host->pdata) {
4123 dev_err(&pdev->dev, "DT parsing error\n");
4124 goto pltfm_free;
4125 }
4126 } else {
4127 dev_err(&pdev->dev, "No device tree node\n");
4128 goto pltfm_free;
4129 }
4130
4131 /* Setup Clocks */
4132
4133 /* Setup SDCC bus voter clock. */
4134 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4135 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4136 /* Vote for max. clk rate for max. performance */
4137 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4138 if (ret)
4139 goto pltfm_free;
4140 ret = clk_prepare_enable(msm_host->bus_clk);
4141 if (ret)
4142 goto pltfm_free;
4143 }
4144
4145 /* Setup main peripheral bus clock */
4146 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4147 if (!IS_ERR(msm_host->pclk)) {
4148 ret = clk_prepare_enable(msm_host->pclk);
4149 if (ret)
4150 goto bus_clk_disable;
4151 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304152 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304153
4154 /* Setup SDC MMC clock */
4155 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4156 if (IS_ERR(msm_host->clk)) {
4157 ret = PTR_ERR(msm_host->clk);
4158 goto pclk_disable;
4159 }
4160
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304161 /* Set to the minimum supported clock frequency */
4162 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4163 if (ret) {
4164 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304165 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304166 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304167 ret = clk_prepare_enable(msm_host->clk);
4168 if (ret)
4169 goto pclk_disable;
4170
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304171 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304172 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304173
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004174 /* Setup CDC calibration fixed feedback clock */
4175 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4176 if (!IS_ERR(msm_host->ff_clk)) {
4177 ret = clk_prepare_enable(msm_host->ff_clk);
4178 if (ret)
4179 goto clk_disable;
4180 }
4181
4182 /* Setup CDC calibration sleep clock */
4183 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4184 if (!IS_ERR(msm_host->sleep_clk)) {
4185 ret = clk_prepare_enable(msm_host->sleep_clk);
4186 if (ret)
4187 goto ff_clk_disable;
4188 }
4189
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004190 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4191
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304192 ret = sdhci_msm_bus_register(msm_host, pdev);
4193 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004194 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304195
4196 if (msm_host->msm_bus_vote.client_handle)
4197 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4198 sdhci_msm_bus_work);
4199 sdhci_msm_bus_voting(host, 1);
4200
Asutosh Das0ef24812012-12-18 16:14:02 +05304201 /* Setup regulators */
4202 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4203 if (ret) {
4204 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304205 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304206 }
4207
4208 /* Reset the core and Enable SDHC mode */
4209 core_memres = platform_get_resource_byname(pdev,
4210 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304211 if (!msm_host->mci_removed) {
4212 if (!core_memres) {
4213 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4214 goto vreg_deinit;
4215 }
4216 msm_host->core_mem = devm_ioremap(&pdev->dev,
4217 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304218
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304219 if (!msm_host->core_mem) {
4220 dev_err(&pdev->dev, "Failed to remap registers\n");
4221 ret = -ENOMEM;
4222 goto vreg_deinit;
4223 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304224 }
4225
Sahitya Tummala079ed852015-10-29 20:18:45 +05304226 tlmm_memres = platform_get_resource_byname(pdev,
4227 IORESOURCE_MEM, "tlmm_mem");
4228 if (tlmm_memres) {
4229 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4230 resource_size(tlmm_memres));
4231
4232 if (!tlmm_mem) {
4233 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4234 ret = -ENOMEM;
4235 goto vreg_deinit;
4236 }
4237 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
4238 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
4239 &tlmm_memres->start, readl_relaxed(tlmm_mem));
4240 }
4241
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304242 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004243 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304244 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004245 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304246 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304247
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304248 if (!msm_host->mci_removed) {
4249 /* Set HC_MODE_EN bit in HC_MODE register */
4250 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304251
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304252 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4253 writel_relaxed(readl_relaxed(msm_host->core_mem +
4254 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4255 msm_host->core_mem + CORE_HC_MODE);
4256 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304257 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004258
4259 /*
4260 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4261 * be used as required later on.
4262 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304263 writel_relaxed((readl_relaxed(host->ioaddr +
4264 msm_host_offset->CORE_VENDOR_SPEC) |
4265 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4266 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304267 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304268 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4269 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4270 * interrupt in GIC (by registering the interrupt handler), we need to
4271 * ensure that any pending power irq interrupt status is acknowledged
4272 * otherwise power irq interrupt handler would be fired prematurely.
4273 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304274 irq_status = sdhci_msm_readl_relaxed(host,
4275 msm_host_offset->CORE_PWRCTL_STATUS);
4276 sdhci_msm_writel_relaxed(irq_status, host,
4277 msm_host_offset->CORE_PWRCTL_CLEAR);
4278 irq_ctl = sdhci_msm_readl_relaxed(host,
4279 msm_host_offset->CORE_PWRCTL_CTL);
4280
Subhash Jadavani28137342013-05-14 17:46:43 +05304281 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4282 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4283 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4284 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304285 sdhci_msm_writel_relaxed(irq_ctl, host,
4286 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004287
Subhash Jadavani28137342013-05-14 17:46:43 +05304288 /*
4289 * Ensure that above writes are propogated before interrupt enablement
4290 * in GIC.
4291 */
4292 mb();
4293
4294 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304295 * Following are the deviations from SDHC spec v3.0 -
4296 * 1. Card detection is handled using separate GPIO.
4297 * 2. Bus power control is handled by interacting with PMIC.
4298 */
4299 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4300 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304301 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004302 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304303 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304304 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304305 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304306 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304307 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304308 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304309
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304310 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4311 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4312
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004313 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004314 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4315 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4316 SDHCI_VENDOR_VER_SHIFT));
4317 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4318 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4319 /*
4320 * Add 40us delay in interrupt handler when
4321 * operating at initialization frequency(400KHz).
4322 */
4323 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4324 /*
4325 * Set Software Reset for DAT line in Software
4326 * Reset Register (Bit 2).
4327 */
4328 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4329 }
4330
Asutosh Das214b9662013-06-13 14:27:42 +05304331 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4332
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004333 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004334 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4335 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304336 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004337 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304338 goto vreg_deinit;
4339 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004340 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304341 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004342 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304343 if (ret) {
4344 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004345 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304346 goto vreg_deinit;
4347 }
4348
4349 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304350 sdhci_msm_writel_relaxed(INT_MASK, host,
4351 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304352
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304353#ifdef CONFIG_MMC_CLKGATE
4354 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4355 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4356#endif
4357
Asutosh Das0ef24812012-12-18 16:14:02 +05304358 /* Set host capabilities */
4359 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4360 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004361 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304362 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304363 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004364 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004365 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004366 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304367 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004368 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004369 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304370 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05304371
4372 if (msm_host->pdata->nonremovable)
4373 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4374
Guoping Yuf7c91332014-08-20 16:56:18 +08004375 if (msm_host->pdata->nonhotplug)
4376 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4377
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304378 init_completion(&msm_host->pwr_irq_completion);
4379
Sahitya Tummala581df132013-03-12 14:57:46 +05304380 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304381 /*
4382 * Set up the card detect GPIO in active configuration before
4383 * configuring it as an IRQ. Otherwise, it can be in some
4384 * weird/inconsistent state resulting in flood of interrupts.
4385 */
4386 sdhci_msm_setup_pins(msm_host->pdata, true);
4387
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304388 /*
4389 * This delay is needed for stabilizing the card detect GPIO
4390 * line after changing the pull configs.
4391 */
4392 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304393 ret = mmc_gpio_request_cd(msm_host->mmc,
4394 msm_host->pdata->status_gpio, 0);
4395 if (ret) {
4396 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4397 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304398 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304399 }
4400 }
4401
Krishna Konda7feab352013-09-17 23:55:40 -07004402 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4403 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4404 host->dma_mask = DMA_BIT_MASK(64);
4405 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304406 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004407 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304408 host->dma_mask = DMA_BIT_MASK(32);
4409 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304410 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304411 } else {
4412 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4413 }
4414
Ritesh Harjani42876f42015-11-17 17:46:51 +05304415 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
4416 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05304417 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304418 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
4419 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05304420 msm_host->is_sdiowakeup_enabled = true;
4421 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
4422 sdhci_msm_sdiowakeup_irq,
4423 IRQF_SHARED | IRQF_TRIGGER_HIGH,
4424 "sdhci-msm sdiowakeup", host);
4425 if (ret) {
4426 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
4427 __func__, msm_host->pdata->sdiowakeup_irq, ret);
4428 msm_host->pdata->sdiowakeup_irq = -1;
4429 msm_host->is_sdiowakeup_enabled = false;
4430 goto vreg_deinit;
4431 } else {
4432 spin_lock_irqsave(&host->lock, flags);
4433 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304434 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304435 spin_unlock_irqrestore(&host->lock, flags);
4436 }
4437 }
4438
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004439 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304440 ret = sdhci_add_host(host);
4441 if (ret) {
4442 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304443 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304444 }
4445
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004446 pm_runtime_set_active(&pdev->dev);
4447 pm_runtime_enable(&pdev->dev);
4448 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4449 pm_runtime_use_autosuspend(&pdev->dev);
4450
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304451 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4452 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4453 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4454 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4455 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4456 ret = device_create_file(&pdev->dev,
4457 &msm_host->msm_bus_vote.max_bus_bw);
4458 if (ret)
4459 goto remove_host;
4460
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304461 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4462 msm_host->polling.show = show_polling;
4463 msm_host->polling.store = store_polling;
4464 sysfs_attr_init(&msm_host->polling.attr);
4465 msm_host->polling.attr.name = "polling";
4466 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4467 ret = device_create_file(&pdev->dev, &msm_host->polling);
4468 if (ret)
4469 goto remove_max_bus_bw_file;
4470 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304471
4472 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4473 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4474 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4475 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4476 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4477 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4478 if (ret) {
4479 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4480 mmc_hostname(host->mmc), __func__, ret);
4481 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4482 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304483 /* Successful initialization */
4484 goto out;
4485
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304486remove_max_bus_bw_file:
4487 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304488remove_host:
4489 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004490 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304491 sdhci_remove_host(host, dead);
4492vreg_deinit:
4493 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304494bus_unregister:
4495 if (msm_host->msm_bus_vote.client_handle)
4496 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4497 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004498sleep_clk_disable:
4499 if (!IS_ERR(msm_host->sleep_clk))
4500 clk_disable_unprepare(msm_host->sleep_clk);
4501ff_clk_disable:
4502 if (!IS_ERR(msm_host->ff_clk))
4503 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304504clk_disable:
4505 if (!IS_ERR(msm_host->clk))
4506 clk_disable_unprepare(msm_host->clk);
4507pclk_disable:
4508 if (!IS_ERR(msm_host->pclk))
4509 clk_disable_unprepare(msm_host->pclk);
4510bus_clk_disable:
4511 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4512 clk_disable_unprepare(msm_host->bus_clk);
4513pltfm_free:
4514 sdhci_pltfm_free(pdev);
4515out:
4516 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4517 return ret;
4518}
4519
4520static int sdhci_msm_remove(struct platform_device *pdev)
4521{
4522 struct sdhci_host *host = platform_get_drvdata(pdev);
4523 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4524 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4525 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4526 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4527 0xffffffff);
4528
4529 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304530 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4531 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304532 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004533 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304534 sdhci_remove_host(host, dead);
4535 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304536
Asutosh Das0ef24812012-12-18 16:14:02 +05304537 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304538
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304539 sdhci_msm_setup_pins(pdata, true);
4540 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304541
4542 if (msm_host->msm_bus_vote.client_handle) {
4543 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4544 sdhci_msm_bus_unregister(msm_host);
4545 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304546 return 0;
4547}
4548
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004549#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05304550static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
4551{
4552 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4553 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4554 unsigned long flags;
4555 int ret = 0;
4556
4557 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
4558 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
4559 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05304560 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304561 return 1;
4562 }
4563
4564 spin_lock_irqsave(&host->lock, flags);
4565 if (enable) {
4566 /* configure DAT1 gpio if applicable */
4567 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304568 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304569 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4570 if (!ret)
4571 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
4572 goto out;
4573 } else {
4574 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
4575 mmc_hostname(host->mmc), enable);
4576 }
4577 } else {
4578 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
4579 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
4580 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304581 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304582 } else {
4583 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
4584 mmc_hostname(host->mmc), enable);
4585
4586 }
4587 }
4588out:
4589 if (ret)
4590 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
4591 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
4592 ret, msm_host->pdata->sdiowakeup_irq);
4593 spin_unlock_irqrestore(&host->lock, flags);
4594 return ret;
4595}
4596
4597
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004598static int sdhci_msm_runtime_suspend(struct device *dev)
4599{
4600 struct sdhci_host *host = dev_get_drvdata(dev);
4601 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4602 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004603 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004604
Ritesh Harjani42876f42015-11-17 17:46:51 +05304605 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4606 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304607
Ritesh Harjani42876f42015-11-17 17:46:51 +05304608 sdhci_cfg_irq(host, false, true);
4609
4610defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004611 disable_irq(msm_host->pwr_irq);
4612
4613 /*
4614 * Remove the vote immediately only if clocks are off in which
4615 * case we might have queued work to remove vote but it may not
4616 * be completed before runtime suspend or system suspend.
4617 */
4618 if (!atomic_read(&msm_host->clks_on)) {
4619 if (msm_host->msm_bus_vote.client_handle)
4620 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4621 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004622 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4623 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004624
4625 return 0;
4626}
4627
4628static int sdhci_msm_runtime_resume(struct device *dev)
4629{
4630 struct sdhci_host *host = dev_get_drvdata(dev);
4631 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4632 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004633 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004634
Ritesh Harjani42876f42015-11-17 17:46:51 +05304635 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4636 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05304637
Ritesh Harjani42876f42015-11-17 17:46:51 +05304638 sdhci_cfg_irq(host, true, true);
4639
4640defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004641 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004642
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004643 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4644 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004645 return 0;
4646}
4647
4648static int sdhci_msm_suspend(struct device *dev)
4649{
4650 struct sdhci_host *host = dev_get_drvdata(dev);
4651 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4652 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004653 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304654 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004655 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004656
4657 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4658 (msm_host->mmc->slot.cd_irq >= 0))
4659 disable_irq(msm_host->mmc->slot.cd_irq);
4660
4661 if (pm_runtime_suspended(dev)) {
4662 pr_debug("%s: %s: already runtime suspended\n",
4663 mmc_hostname(host->mmc), __func__);
4664 goto out;
4665 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004666 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004667out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304668
4669 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4670 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
4671 if (sdio_cfg)
4672 sdhci_cfg_irq(host, false, true);
4673 }
4674
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004675 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4676 ktime_to_us(ktime_sub(ktime_get(), start)));
4677 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004678}
4679
4680static int sdhci_msm_resume(struct device *dev)
4681{
4682 struct sdhci_host *host = dev_get_drvdata(dev);
4683 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4684 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4685 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304686 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004687 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004688
4689 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4690 (msm_host->mmc->slot.cd_irq >= 0))
4691 enable_irq(msm_host->mmc->slot.cd_irq);
4692
4693 if (pm_runtime_suspended(dev)) {
4694 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4695 mmc_hostname(host->mmc), __func__);
4696 goto out;
4697 }
4698
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004699 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004700out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05304701 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4702 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
4703 if (sdio_cfg)
4704 sdhci_cfg_irq(host, true, true);
4705 }
4706
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004707 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4708 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004709 return ret;
4710}
4711
Ritesh Harjani42876f42015-11-17 17:46:51 +05304712static int sdhci_msm_suspend_noirq(struct device *dev)
4713{
4714 struct sdhci_host *host = dev_get_drvdata(dev);
4715 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4716 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4717 int ret = 0;
4718
4719 /*
4720 * ksdioirqd may be running, hence retry
4721 * suspend in case the clocks are ON
4722 */
4723 if (atomic_read(&msm_host->clks_on)) {
4724 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
4725 mmc_hostname(host->mmc), __func__);
4726 ret = -EAGAIN;
4727 }
4728
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05304729 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
4730 if (msm_host->sdio_pending_processing)
4731 ret = -EBUSY;
4732
Ritesh Harjani42876f42015-11-17 17:46:51 +05304733 return ret;
4734}
4735
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004736static const struct dev_pm_ops sdhci_msm_pmops = {
4737 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4738 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4739 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05304740 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004741};
4742
4743#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4744
4745#else
4746#define SDHCI_MSM_PMOPS NULL
4747#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304748static const struct of_device_id sdhci_msm_dt_match[] = {
4749 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304750 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004751 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304752};
4753MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4754
4755static struct platform_driver sdhci_msm_driver = {
4756 .probe = sdhci_msm_probe,
4757 .remove = sdhci_msm_remove,
4758 .driver = {
4759 .name = "sdhci_msm",
4760 .owner = THIS_MODULE,
4761 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004762 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304763 },
4764};
4765
4766module_platform_driver(sdhci_msm_driver);
4767
4768MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4769MODULE_LICENSE("GPL v2");