blob: 831b64d54b2a0e0f6310682f20320cf8315bf2b1 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Asutosh Das1c43b132018-01-11 18:08:40 +05305 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Asutosh Das1c43b132018-01-11 18:08:40 +053042#include <linux/nvmem-consumer.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020043#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053044
Sahitya Tummala56874732015-05-21 08:24:03 +053045#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053046#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070047#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053048
Asutosh Das36c2e922015-12-01 12:19:58 +053049#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070053#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080054
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080063#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Asutosh Das0ef24812012-12-18 16:14:02 +053072#define CORE_PWRCTL_BUS_OFF 0x01
73#define CORE_PWRCTL_BUS_ON (1 << 1)
74#define CORE_PWRCTL_IO_LOW (1 << 2)
75#define CORE_PWRCTL_IO_HIGH (1 << 3)
76
77#define CORE_PWRCTL_BUS_SUCCESS 0x01
78#define CORE_PWRCTL_BUS_FAIL (1 << 1)
79#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
80#define CORE_PWRCTL_IO_FAIL (1 << 3)
81
82#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070083#define MAX_PHASES 16
84
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070085#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070086#define CORE_DLL_EN (1 << 16)
87#define CORE_CDR_EN (1 << 17)
88#define CORE_CK_OUT_EN (1 << 18)
89#define CORE_CDR_EXT_EN (1 << 19)
90#define CORE_DLL_PDN (1 << 29)
91#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070093#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070094#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095
Krishna Konda46fd1432014-10-30 21:13:27 -070096#define CORE_CLK_PWRSAVE (1 << 1)
97#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
98#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
99#define CORE_HC_MCLK_SEL_MASK (3 << 8)
100#define CORE_HC_AUTO_CMD21_EN (1 << 6)
101#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700102#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700103#define CORE_HC_SELECT_IN_EN (1 << 18)
104#define CORE_HC_SELECT_IN_HS400 (6 << 19)
105#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700106#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700107
Pavan Anamula691dd592015-08-25 16:11:20 +0530108#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
109#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530110#define CORE_ONE_MID_EN (1 << 25)
111
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530112#define CORE_8_BIT_SUPPORT (1 << 18)
113#define CORE_3_3V_SUPPORT (1 << 24)
114#define CORE_3_0V_SUPPORT (1 << 25)
115#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300116#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700117
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700118#define CORE_CSR_CDC_CTLR_CFG0 0x130
119#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
120#define CORE_HW_AUTOCAL_ENA (1 << 17)
121
122#define CORE_CSR_CDC_CTLR_CFG1 0x134
123#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
124#define CORE_TIMER_ENA (1 << 16)
125
126#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
127#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
128#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
129#define CORE_CDC_OFFSET_CFG 0x14C
130#define CORE_CSR_CDC_DELAY_CFG 0x150
131#define CORE_CDC_SLAVE_DDA_CFG 0x160
132#define CORE_CSR_CDC_STATUS0 0x164
133#define CORE_CALIBRATION_DONE (1 << 0)
134
135#define CORE_CDC_ERROR_CODE_MASK 0x7000000
136
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300137#define CQ_CMD_DBG_RAM 0x110
138#define CQ_CMD_DBG_RAM_WA 0x150
139#define CQ_CMD_DBG_RAM_OL 0x154
140
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700141#define CORE_CSR_CDC_GEN_CFG 0x178
142#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
143#define CORE_CDC_SWITCH_RC_EN (1 << 1)
144
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700147#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530148
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530150#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530151#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700152
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800154#define CORE_FLL_CYCLE_CNT (1 << 18)
155#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700156
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530157#define DDR_CONFIG_POR_VAL 0x80040853
158#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
159#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700160#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700161
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700162/* 512 descriptors */
163#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530164#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530165
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700166#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800167#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700168
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530170#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700171
Krishna Konda96e6b112013-10-28 15:25:03 -0700172#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200173#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200174#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700175
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530176struct sdhci_msm_offset {
177 u32 CORE_MCI_DATA_CNT;
178 u32 CORE_MCI_STATUS;
179 u32 CORE_MCI_FIFO_CNT;
180 u32 CORE_MCI_VERSION;
181 u32 CORE_GENERICS;
182 u32 CORE_TESTBUS_CONFIG;
183 u32 CORE_TESTBUS_SEL2_BIT;
184 u32 CORE_TESTBUS_ENA;
185 u32 CORE_TESTBUS_SEL2;
186 u32 CORE_PWRCTL_STATUS;
187 u32 CORE_PWRCTL_MASK;
188 u32 CORE_PWRCTL_CLEAR;
189 u32 CORE_PWRCTL_CTL;
190 u32 CORE_SDCC_DEBUG_REG;
191 u32 CORE_DLL_CONFIG;
192 u32 CORE_DLL_STATUS;
193 u32 CORE_VENDOR_SPEC;
194 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
195 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
196 u32 CORE_VENDOR_SPEC_FUNC2;
197 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
198 u32 CORE_DDR_200_CFG;
199 u32 CORE_VENDOR_SPEC3;
200 u32 CORE_DLL_CONFIG_2;
201 u32 CORE_DDR_CONFIG;
202 u32 CORE_DDR_CONFIG_2;
203};
204
205struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
206 .CORE_MCI_DATA_CNT = 0x35C,
207 .CORE_MCI_STATUS = 0x324,
208 .CORE_MCI_FIFO_CNT = 0x308,
209 .CORE_MCI_VERSION = 0x318,
210 .CORE_GENERICS = 0x320,
211 .CORE_TESTBUS_CONFIG = 0x32C,
212 .CORE_TESTBUS_SEL2_BIT = 3,
213 .CORE_TESTBUS_ENA = (1 << 31),
214 .CORE_TESTBUS_SEL2 = (1 << 3),
215 .CORE_PWRCTL_STATUS = 0x240,
216 .CORE_PWRCTL_MASK = 0x244,
217 .CORE_PWRCTL_CLEAR = 0x248,
218 .CORE_PWRCTL_CTL = 0x24C,
219 .CORE_SDCC_DEBUG_REG = 0x358,
220 .CORE_DLL_CONFIG = 0x200,
221 .CORE_DLL_STATUS = 0x208,
222 .CORE_VENDOR_SPEC = 0x20C,
223 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
224 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
225 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
226 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
227 .CORE_DDR_200_CFG = 0x224,
228 .CORE_VENDOR_SPEC3 = 0x250,
229 .CORE_DLL_CONFIG_2 = 0x254,
230 .CORE_DDR_CONFIG = 0x258,
231 .CORE_DDR_CONFIG_2 = 0x25C,
232};
233
234struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
235 .CORE_MCI_DATA_CNT = 0x30,
236 .CORE_MCI_STATUS = 0x34,
237 .CORE_MCI_FIFO_CNT = 0x44,
238 .CORE_MCI_VERSION = 0x050,
239 .CORE_GENERICS = 0x70,
240 .CORE_TESTBUS_CONFIG = 0x0CC,
241 .CORE_TESTBUS_SEL2_BIT = 4,
242 .CORE_TESTBUS_ENA = (1 << 3),
243 .CORE_TESTBUS_SEL2 = (1 << 4),
244 .CORE_PWRCTL_STATUS = 0xDC,
245 .CORE_PWRCTL_MASK = 0xE0,
246 .CORE_PWRCTL_CLEAR = 0xE4,
247 .CORE_PWRCTL_CTL = 0xE8,
248 .CORE_SDCC_DEBUG_REG = 0x124,
249 .CORE_DLL_CONFIG = 0x100,
250 .CORE_DLL_STATUS = 0x108,
251 .CORE_VENDOR_SPEC = 0x10C,
252 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
253 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
254 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
255 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
256 .CORE_DDR_200_CFG = 0x184,
257 .CORE_VENDOR_SPEC3 = 0x1B0,
258 .CORE_DLL_CONFIG_2 = 0x1B4,
259 .CORE_DDR_CONFIG = 0x1B8,
260 .CORE_DDR_CONFIG_2 = 0x1BC,
261};
262
263u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
264{
265 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
266 struct sdhci_msm_host *msm_host = pltfm_host->priv;
267 void __iomem *base_addr;
268
269 if (msm_host->mci_removed)
270 base_addr = host->ioaddr;
271 else
272 base_addr = msm_host->core_mem;
273
274 return readb_relaxed(base_addr + offset);
275}
276
277u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
278{
279 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
280 struct sdhci_msm_host *msm_host = pltfm_host->priv;
281 void __iomem *base_addr;
282
283 if (msm_host->mci_removed)
284 base_addr = host->ioaddr;
285 else
286 base_addr = msm_host->core_mem;
287
288 return readl_relaxed(base_addr + offset);
289}
290
291void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
292{
293 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
294 struct sdhci_msm_host *msm_host = pltfm_host->priv;
295 void __iomem *base_addr;
296
297 if (msm_host->mci_removed)
298 base_addr = host->ioaddr;
299 else
300 base_addr = msm_host->core_mem;
301
302 writeb_relaxed(val, base_addr + offset);
303}
304
305void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
306{
307 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
308 struct sdhci_msm_host *msm_host = pltfm_host->priv;
309 void __iomem *base_addr;
310
311 if (msm_host->mci_removed)
312 base_addr = host->ioaddr;
313 else
314 base_addr = msm_host->core_mem;
315
316 writel_relaxed(val, base_addr + offset);
317}
318
Ritesh Harjani82124772014-11-04 15:34:00 +0530319/* Timeout value to avoid infinite waiting for pwr_irq */
320#define MSM_PWR_IRQ_TIMEOUT_MS 5000
321
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700322static const u32 tuning_block_64[] = {
323 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
324 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
325 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
326 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
327};
328
329static const u32 tuning_block_128[] = {
330 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
331 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
332 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
333 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
334 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
335 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
336 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
337 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
338};
Asutosh Das0ef24812012-12-18 16:14:02 +0530339
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700340/* global to hold each slot instance for debug */
341static struct sdhci_msm_host *sdhci_slot[2];
342
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700343static int disable_slots;
344/* root can write, others read */
345module_param(disable_slots, int, S_IRUGO|S_IWUSR);
346
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530347static bool nocmdq;
348module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
349
Asutosh Das0ef24812012-12-18 16:14:02 +0530350enum vdd_io_level {
351 /* set vdd_io_data->low_vol_level */
352 VDD_IO_LOW,
353 /* set vdd_io_data->high_vol_level */
354 VDD_IO_HIGH,
355 /*
356 * set whatever there in voltage_level (third argument) of
357 * sdhci_msm_set_vdd_io_vol() function.
358 */
359 VDD_IO_SET_LEVEL,
360};
361
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700362/* MSM platform specific tuning */
363static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
364 u8 poll)
365{
366 int rc = 0;
367 u32 wait_cnt = 50;
368 u8 ck_out_en = 0;
369 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530370 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
371 struct sdhci_msm_host *msm_host = pltfm_host->priv;
372 const struct sdhci_msm_offset *msm_host_offset =
373 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700374
375 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530376 ck_out_en = !!(readl_relaxed(host->ioaddr +
377 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700378
379 while (ck_out_en != poll) {
380 if (--wait_cnt == 0) {
381 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
382 mmc_hostname(mmc), __func__, poll);
383 rc = -ETIMEDOUT;
384 goto out;
385 }
386 udelay(1);
387
388 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530389 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700390 }
391out:
392 return rc;
393}
394
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530395/*
396 * Enable CDR to track changes of DAT lines and adjust sampling
397 * point according to voltage/temperature variations
398 */
399static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
400{
401 int rc = 0;
402 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530403 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
404 struct sdhci_msm_host *msm_host = pltfm_host->priv;
405 const struct sdhci_msm_offset *msm_host_offset =
406 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530407
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530408 config = readl_relaxed(host->ioaddr +
409 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530410 config |= CORE_CDR_EN;
411 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530412 writel_relaxed(config, host->ioaddr +
413 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530414
415 rc = msm_dll_poll_ck_out_en(host, 0);
416 if (rc)
417 goto err;
418
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530419 writel_relaxed((readl_relaxed(host->ioaddr +
420 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
421 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530422
423 rc = msm_dll_poll_ck_out_en(host, 1);
424 if (rc)
425 goto err;
426 goto out;
427err:
428 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
429out:
430 return rc;
431}
432
433static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
434 *attr, const char *buf, size_t count)
435{
436 struct sdhci_host *host = dev_get_drvdata(dev);
437 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
438 struct sdhci_msm_host *msm_host = pltfm_host->priv;
439 u32 tmp;
440 unsigned long flags;
441
442 if (!kstrtou32(buf, 0, &tmp)) {
443 spin_lock_irqsave(&host->lock, flags);
444 msm_host->en_auto_cmd21 = !!tmp;
445 spin_unlock_irqrestore(&host->lock, flags);
446 }
447 return count;
448}
449
450static ssize_t show_auto_cmd21(struct device *dev,
451 struct device_attribute *attr, char *buf)
452{
453 struct sdhci_host *host = dev_get_drvdata(dev);
454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
455 struct sdhci_msm_host *msm_host = pltfm_host->priv;
456
457 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
458}
459
460/* MSM auto-tuning handler */
461static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
462 bool enable,
463 u32 type)
464{
465 int rc = 0;
466 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
467 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530468 const struct sdhci_msm_offset *msm_host_offset =
469 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530470 u32 val = 0;
471
472 if (!msm_host->en_auto_cmd21)
473 return 0;
474
475 if (type == MMC_SEND_TUNING_BLOCK_HS200)
476 val = CORE_HC_AUTO_CMD21_EN;
477 else
478 return 0;
479
480 if (enable) {
481 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530482 writel_relaxed(readl_relaxed(host->ioaddr +
483 msm_host_offset->CORE_VENDOR_SPEC) | val,
484 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530485 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530486 writel_relaxed(readl_relaxed(host->ioaddr +
487 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
488 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530489 }
490 return rc;
491}
492
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700493static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
494{
495 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530496 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
497 struct sdhci_msm_host *msm_host = pltfm_host->priv;
498 const struct sdhci_msm_offset *msm_host_offset =
499 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700500 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
501 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
502 0x8};
503 unsigned long flags;
504 u32 config;
505 struct mmc_host *mmc = host->mmc;
506
507 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
508 spin_lock_irqsave(&host->lock, flags);
509
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530510 config = readl_relaxed(host->ioaddr +
511 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700512 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
513 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530514 writel_relaxed(config, host->ioaddr +
515 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700516
517 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
518 rc = msm_dll_poll_ck_out_en(host, 0);
519 if (rc)
520 goto err_out;
521
522 /*
523 * Write the selected DLL clock output phase (0 ... 15)
524 * to CDR_SELEXT bit field of DLL_CONFIG register.
525 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530526 writel_relaxed(((readl_relaxed(host->ioaddr +
527 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700528 & ~(0xF << 20))
529 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530530 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700531
532 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530533 writel_relaxed((readl_relaxed(host->ioaddr +
534 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
535 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700536
537 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
538 rc = msm_dll_poll_ck_out_en(host, 1);
539 if (rc)
540 goto err_out;
541
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530542 config = readl_relaxed(host->ioaddr +
543 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700544 config |= CORE_CDR_EN;
545 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530546 writel_relaxed(config, host->ioaddr +
547 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700548 goto out;
549
550err_out:
551 pr_err("%s: %s: Failed to set DLL phase: %d\n",
552 mmc_hostname(mmc), __func__, phase);
553out:
554 spin_unlock_irqrestore(&host->lock, flags);
555 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
556 return rc;
557}
558
559/*
560 * Find out the greatest range of consecuitive selected
561 * DLL clock output phases that can be used as sampling
562 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700563 * timing mode) or for eMMC4.5 card read operation (in
564 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700565 * Select the 3/4 of the range and configure the DLL with the
566 * selected DLL clock output phase.
567 */
568
569static int msm_find_most_appropriate_phase(struct sdhci_host *host,
570 u8 *phase_table, u8 total_phases)
571{
572 int ret;
573 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
574 u8 phases_per_row[MAX_PHASES] = {0};
575 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
576 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
577 bool phase_0_found = false, phase_15_found = false;
578 struct mmc_host *mmc = host->mmc;
579
580 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
581 if (!total_phases || (total_phases > MAX_PHASES)) {
582 pr_err("%s: %s: invalid argument: total_phases=%d\n",
583 mmc_hostname(mmc), __func__, total_phases);
584 return -EINVAL;
585 }
586
587 for (cnt = 0; cnt < total_phases; cnt++) {
588 ranges[row_index][col_index] = phase_table[cnt];
589 phases_per_row[row_index] += 1;
590 col_index++;
591
592 if ((cnt + 1) == total_phases) {
593 continue;
594 /* check if next phase in phase_table is consecutive or not */
595 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
596 row_index++;
597 col_index = 0;
598 }
599 }
600
601 if (row_index >= MAX_PHASES)
602 return -EINVAL;
603
604 /* Check if phase-0 is present in first valid window? */
605 if (!ranges[0][0]) {
606 phase_0_found = true;
607 phase_0_raw_index = 0;
608 /* Check if cycle exist between 2 valid windows */
609 for (cnt = 1; cnt <= row_index; cnt++) {
610 if (phases_per_row[cnt]) {
611 for (i = 0; i < phases_per_row[cnt]; i++) {
612 if (ranges[cnt][i] == 15) {
613 phase_15_found = true;
614 phase_15_raw_index = cnt;
615 break;
616 }
617 }
618 }
619 }
620 }
621
622 /* If 2 valid windows form cycle then merge them as single window */
623 if (phase_0_found && phase_15_found) {
624 /* number of phases in raw where phase 0 is present */
625 u8 phases_0 = phases_per_row[phase_0_raw_index];
626 /* number of phases in raw where phase 15 is present */
627 u8 phases_15 = phases_per_row[phase_15_raw_index];
628
629 if (phases_0 + phases_15 >= MAX_PHASES)
630 /*
631 * If there are more than 1 phase windows then total
632 * number of phases in both the windows should not be
633 * more than or equal to MAX_PHASES.
634 */
635 return -EINVAL;
636
637 /* Merge 2 cyclic windows */
638 i = phases_15;
639 for (cnt = 0; cnt < phases_0; cnt++) {
640 ranges[phase_15_raw_index][i] =
641 ranges[phase_0_raw_index][cnt];
642 if (++i >= MAX_PHASES)
643 break;
644 }
645
646 phases_per_row[phase_0_raw_index] = 0;
647 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
648 }
649
650 for (cnt = 0; cnt <= row_index; cnt++) {
651 if (phases_per_row[cnt] > curr_max) {
652 curr_max = phases_per_row[cnt];
653 selected_row_index = cnt;
654 }
655 }
656
657 i = ((curr_max * 3) / 4);
658 if (i)
659 i--;
660
661 ret = (int)ranges[selected_row_index][i];
662
663 if (ret >= MAX_PHASES) {
664 ret = -EINVAL;
665 pr_err("%s: %s: invalid phase selected=%d\n",
666 mmc_hostname(mmc), __func__, ret);
667 }
668
669 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
670 return ret;
671}
672
673static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
674{
675 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530676 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
677 struct sdhci_msm_host *msm_host = pltfm_host->priv;
678 const struct sdhci_msm_offset *msm_host_offset =
679 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700680
681 /* Program the MCLK value to MCLK_FREQ bit field */
682 if (host->clock <= 112000000)
683 mclk_freq = 0;
684 else if (host->clock <= 125000000)
685 mclk_freq = 1;
686 else if (host->clock <= 137000000)
687 mclk_freq = 2;
688 else if (host->clock <= 150000000)
689 mclk_freq = 3;
690 else if (host->clock <= 162000000)
691 mclk_freq = 4;
692 else if (host->clock <= 175000000)
693 mclk_freq = 5;
694 else if (host->clock <= 187000000)
695 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700696 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700697 mclk_freq = 7;
698
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530699 writel_relaxed(((readl_relaxed(host->ioaddr +
700 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700701 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530702 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700703}
704
705/* Initialize the DLL (Programmable Delay Line ) */
706static int msm_init_cm_dll(struct sdhci_host *host)
707{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800708 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
709 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530710 const struct sdhci_msm_offset *msm_host_offset =
711 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700712 struct mmc_host *mmc = host->mmc;
713 int rc = 0;
714 unsigned long flags;
715 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530716 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700717
718 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
719 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530720 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
721 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530722 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700723 /*
724 * Make sure that clock is always enabled when DLL
725 * tuning is in progress. Keeping PWRSAVE ON may
726 * turn off the clock. So let's disable the PWRSAVE
727 * here and re-enable it once tuning is completed.
728 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530729 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530730 writel_relaxed((readl_relaxed(host->ioaddr +
731 msm_host_offset->CORE_VENDOR_SPEC)
732 & ~CORE_CLK_PWRSAVE), host->ioaddr +
733 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530734 curr_pwrsave = false;
735 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700736
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800737 if (msm_host->use_updated_dll_reset) {
738 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530739 writel_relaxed((readl_relaxed(host->ioaddr +
740 msm_host_offset->CORE_DLL_CONFIG)
741 & ~CORE_CK_OUT_EN), host->ioaddr +
742 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800743
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530744 writel_relaxed((readl_relaxed(host->ioaddr +
745 msm_host_offset->CORE_DLL_CONFIG_2)
746 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
747 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800748 }
749
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700750 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530751 writel_relaxed((readl_relaxed(host->ioaddr +
752 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
753 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700754
755 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530756 writel_relaxed((readl_relaxed(host->ioaddr +
757 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
758 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700759 msm_cm_dll_set_freq(host);
760
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800761 if (msm_host->use_updated_dll_reset) {
762 u32 mclk_freq = 0;
763
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530764 if ((readl_relaxed(host->ioaddr +
765 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800766 & CORE_FLL_CYCLE_CNT))
767 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
768 else
769 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
770
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530771 writel_relaxed(((readl_relaxed(host->ioaddr +
772 msm_host_offset->CORE_DLL_CONFIG_2)
773 & ~(0xFF << 10)) | (mclk_freq << 10)),
774 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800775 /* wait for 5us before enabling DLL clock */
776 udelay(5);
777 }
778
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700779 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530780 writel_relaxed((readl_relaxed(host->ioaddr +
781 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
782 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700783
784 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530785 writel_relaxed((readl_relaxed(host->ioaddr +
786 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
787 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700788
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800789 if (msm_host->use_updated_dll_reset) {
790 msm_cm_dll_set_freq(host);
791 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530792 writel_relaxed((readl_relaxed(host->ioaddr +
793 msm_host_offset->CORE_DLL_CONFIG_2)
794 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
795 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800796 }
797
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700798 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530799 writel_relaxed((readl_relaxed(host->ioaddr +
800 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
801 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700802
803 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530804 writel_relaxed((readl_relaxed(host->ioaddr +
805 msm_host_offset->CORE_DLL_CONFIG)
806 | CORE_CK_OUT_EN), host->ioaddr +
807 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700808
809 wait_cnt = 50;
810 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530811 while (!(readl_relaxed(host->ioaddr +
812 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700813 /* max. wait for 50us sec for LOCK bit to be set */
814 if (--wait_cnt == 0) {
815 pr_err("%s: %s: DLL failed to LOCK\n",
816 mmc_hostname(mmc), __func__);
817 rc = -ETIMEDOUT;
818 goto out;
819 }
820 /* wait for 1us before polling again */
821 udelay(1);
822 }
823
824out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530825 /* Restore the correct PWRSAVE state */
826 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530827 u32 reg = readl_relaxed(host->ioaddr +
828 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530829
830 if (prev_pwrsave)
831 reg |= CORE_CLK_PWRSAVE;
832 else
833 reg &= ~CORE_CLK_PWRSAVE;
834
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530835 writel_relaxed(reg, host->ioaddr +
836 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530837 }
838
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700839 spin_unlock_irqrestore(&host->lock, flags);
840 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
841 return rc;
842}
843
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700844static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
845{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700846 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700847 int ret = 0;
848 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530849 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
850 struct sdhci_msm_host *msm_host = pltfm_host->priv;
851 const struct sdhci_msm_offset *msm_host_offset =
852 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700853
854 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
855
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700856 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530857 writel_relaxed((readl_relaxed(host->ioaddr +
858 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700859 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530860 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700861
862 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
863 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
864 & ~CORE_CDC_SWITCH_BYPASS_OFF),
865 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
866
867 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
868 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
869 | CORE_CDC_SWITCH_RC_EN),
870 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
871
872 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530873 writel_relaxed((readl_relaxed(host->ioaddr +
874 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700875 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530876 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700877
878 /*
879 * Perform CDC Register Initialization Sequence
880 *
881 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
882 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
883 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
884 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
885 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
886 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
887 * CORE_CSR_CDC_DELAY_CFG 0x3AC
888 * CORE_CDC_OFFSET_CFG 0x0
889 * CORE_CDC_SLAVE_DDA_CFG 0x16334
890 */
891
892 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
893 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
894 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
895 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
896 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
897 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700898 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700899 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
900 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
901
902 /* CDC HW Calibration */
903
904 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
905 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
906 | CORE_SW_TRIG_FULL_CALIB),
907 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
908
909 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
910 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
911 & ~CORE_SW_TRIG_FULL_CALIB),
912 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
913
914 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
915 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
916 | CORE_HW_AUTOCAL_ENA),
917 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
918
919 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
920 writel_relaxed((readl_relaxed(host->ioaddr +
921 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
922 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
923
924 mb();
925
926 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700927 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
928 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
929
930 if (ret == -ETIMEDOUT) {
931 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700932 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700933 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700934 }
935
936 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
937 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
938 & CORE_CDC_ERROR_CODE_MASK;
939 if (cdc_err) {
940 pr_err("%s: %s: CDC Error Code %d\n",
941 mmc_hostname(host->mmc), __func__, cdc_err);
942 ret = -EINVAL;
943 goto out;
944 }
945
946 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530947 writel_relaxed((readl_relaxed(host->ioaddr +
948 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700949 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530950 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700951out:
952 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
953 __func__, ret);
954 return ret;
955}
956
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700957static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
958{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530959 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
960 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530961 const struct sdhci_msm_offset *msm_host_offset =
962 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530963 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700964 int ret = 0;
965
966 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
967
968 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530969 * Reprogramming the value in case it might have been modified by
970 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700971 */
Vijay Viswanatha5492612017-10-17 15:38:55 +0530972 if (msm_host->pdata->rclk_wa) {
973 writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
974 msm_host_offset->CORE_DDR_CONFIG_2);
975 } else if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
977 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700978 } else {
979 ddr_config = DDR_CONFIG_POR_VAL &
980 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
981 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530982 writel_relaxed(ddr_config, host->ioaddr +
983 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700984 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700985
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530986 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530987 writel_relaxed((readl_relaxed(host->ioaddr +
988 msm_host_offset->CORE_DDR_200_CFG)
989 | CORE_CMDIN_RCLK_EN), host->ioaddr +
990 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +0530991
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700992 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530993 writel_relaxed((readl_relaxed(host->ioaddr +
994 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700995 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530996 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700997
998 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530999 ret = readl_poll_timeout(host->ioaddr +
1000 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001001 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
1002
1003 if (ret == -ETIMEDOUT) {
1004 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1005 mmc_hostname(host->mmc), __func__);
1006 goto out;
1007 }
1008
Ritesh Harjani764065e2015-05-13 14:14:45 +05301009 /*
1010 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1011 * when MCLK is gated OFF, it is not gated for less than 0.5us
1012 * and MCLK must be switched on for at-least 1us before DATA
1013 * starts coming. Controllers with 14lpp tech DLL cannot
1014 * guarantee above requirement. So PWRSAVE_DLL should not be
1015 * turned on for host controllers using this DLL.
1016 */
1017 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301018 writel_relaxed((readl_relaxed(host->ioaddr +
1019 msm_host_offset->CORE_VENDOR_SPEC3)
1020 | CORE_PWRSAVE_DLL), host->ioaddr +
1021 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001022 mb();
1023out:
1024 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1025 __func__, ret);
1026 return ret;
1027}
1028
Ritesh Harjaniea709662015-05-27 15:40:24 +05301029static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1030{
1031 int ret = 0;
1032 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1033 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1034 struct mmc_host *mmc = host->mmc;
1035
1036 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1037
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301038 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1039 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301040 mmc_hostname(mmc));
1041 return -EINVAL;
1042 }
1043
1044 if (msm_host->calibration_done ||
1045 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1046 return 0;
1047 }
1048
1049 /*
1050 * Reset the tuning block.
1051 */
1052 ret = msm_init_cm_dll(host);
1053 if (ret)
1054 goto out;
1055
1056 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1057out:
1058 if (!ret)
1059 msm_host->calibration_done = true;
1060 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1061 __func__, ret);
1062 return ret;
1063}
1064
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001065static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1066{
1067 int ret = 0;
1068 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1069 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301070 const struct sdhci_msm_offset *msm_host_offset =
1071 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001072
1073 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1074
1075 /*
1076 * Retuning in HS400 (DDR mode) will fail, just reset the
1077 * tuning block and restore the saved tuning phase.
1078 */
1079 ret = msm_init_cm_dll(host);
1080 if (ret)
1081 goto out;
1082
1083 /* Set the selected phase in delay line hw block */
1084 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1085 if (ret)
1086 goto out;
1087
Krishna Konda0e8efba2014-06-23 14:50:38 -07001088 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301089 writel_relaxed((readl_relaxed(host->ioaddr +
1090 msm_host_offset->CORE_DLL_CONFIG)
1091 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1092 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001093
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001094 if (msm_host->use_cdclp533)
1095 /* Calibrate CDCLP533 DLL HW */
1096 ret = sdhci_msm_cdclp533_calibration(host);
1097 else
1098 /* Calibrate CM_DLL_SDC4 HW */
1099 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1100out:
1101 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1102 __func__, ret);
1103 return ret;
1104}
1105
Krishna Konda96e6b112013-10-28 15:25:03 -07001106static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1107 u8 drv_type)
1108{
1109 struct mmc_command cmd = {0};
1110 struct mmc_request mrq = {NULL};
1111 struct mmc_host *mmc = host->mmc;
1112 u8 val = ((drv_type << 4) | 2);
1113
1114 cmd.opcode = MMC_SWITCH;
1115 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1116 (EXT_CSD_HS_TIMING << 16) |
1117 (val << 8) |
1118 EXT_CSD_CMD_SET_NORMAL;
1119 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1120 /* 1 sec */
1121 cmd.busy_timeout = 1000 * 1000;
1122
1123 memset(cmd.resp, 0, sizeof(cmd.resp));
1124 cmd.retries = 3;
1125
1126 mrq.cmd = &cmd;
1127 cmd.data = NULL;
1128
1129 mmc_wait_for_req(mmc, &mrq);
1130 pr_debug("%s: %s: set card drive type to %d\n",
1131 mmc_hostname(mmc), __func__,
1132 drv_type);
1133}
1134
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001135int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1136{
1137 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301138 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001139 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001140 const u32 *tuning_block_pattern = tuning_block_64;
1141 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1142 int rc;
1143 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301144 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001145 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1146 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001147 u8 drv_type = 0;
1148 bool drv_type_changed = false;
1149 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301150 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301151 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301152
1153 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001154 * Tuning is required for SDR104, HS200 and HS400 cards and
1155 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301156 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001157 if (host->clock <= CORE_FREQ_100MHZ ||
1158 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1159 (ios.timing == MMC_TIMING_MMC_HS200) ||
1160 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301161 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001162
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301163 /*
1164 * Don't allow re-tuning for CRC errors observed for any commands
1165 * that are sent during tuning sequence itself.
1166 */
1167 if (msm_host->tuning_in_progress)
1168 return 0;
1169 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001170 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001171
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001172 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001173 if (msm_host->tuning_done && !msm_host->calibration_done &&
1174 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001175 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001176 spin_lock_irqsave(&host->lock, flags);
1177 if (!rc)
1178 msm_host->calibration_done = true;
1179 spin_unlock_irqrestore(&host->lock, flags);
1180 goto out;
1181 }
1182
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001183 spin_lock_irqsave(&host->lock, flags);
1184
1185 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1186 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1187 tuning_block_pattern = tuning_block_128;
1188 size = sizeof(tuning_block_128);
1189 }
1190 spin_unlock_irqrestore(&host->lock, flags);
1191
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001192 data_buf = kmalloc(size, GFP_KERNEL);
1193 if (!data_buf) {
1194 rc = -ENOMEM;
1195 goto out;
1196 }
1197
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301198retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001199 tuned_phase_cnt = 0;
1200
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301201 /* first of all reset the tuning block */
1202 rc = msm_init_cm_dll(host);
1203 if (rc)
1204 goto kfree;
1205
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001206 phase = 0;
1207 do {
1208 struct mmc_command cmd = {0};
1209 struct mmc_data data = {0};
1210 struct mmc_request mrq = {
1211 .cmd = &cmd,
1212 .data = &data
1213 };
1214 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301215 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001216
1217 /* set the phase in delay line hw block */
1218 rc = msm_config_cm_dll_phase(host, phase);
1219 if (rc)
1220 goto kfree;
1221
1222 cmd.opcode = opcode;
1223 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1224
1225 data.blksz = size;
1226 data.blocks = 1;
1227 data.flags = MMC_DATA_READ;
1228 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1229
1230 data.sg = &sg;
1231 data.sg_len = 1;
1232 sg_init_one(&sg, data_buf, size);
1233 memset(data_buf, 0, size);
1234 mmc_wait_for_req(mmc, &mrq);
1235
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301236 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301237 /*
1238 * Set the dll to last known good phase while sending
1239 * status command to ensure that status command won't
1240 * fail due to bad phase.
1241 */
1242 if (tuned_phase_cnt)
1243 last_good_phase =
1244 tuned_phases[tuned_phase_cnt-1];
1245 else if (msm_host->saved_tuning_phase !=
1246 INVALID_TUNING_PHASE)
1247 last_good_phase = msm_host->saved_tuning_phase;
1248
1249 rc = msm_config_cm_dll_phase(host, last_good_phase);
1250 if (rc)
1251 goto kfree;
1252
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301253 sts_cmd.opcode = MMC_SEND_STATUS;
1254 sts_cmd.arg = card->rca << 16;
1255 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1256 sts_retry = 5;
1257 while (sts_retry) {
1258 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1259
1260 if (sts_cmd.error ||
1261 (R1_CURRENT_STATE(sts_cmd.resp[0])
1262 != R1_STATE_TRAN)) {
1263 sts_retry--;
1264 /*
1265 * wait for at least 146 MCLK cycles for
1266 * the card to move to TRANS state. As
1267 * the MCLK would be min 200MHz for
1268 * tuning, we need max 0.73us delay. To
1269 * be on safer side 1ms delay is given.
1270 */
1271 usleep_range(1000, 1200);
1272 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1273 mmc_hostname(mmc), phase,
1274 sts_cmd.error, sts_cmd.resp[0]);
1275 continue;
1276 }
1277 break;
1278 };
1279 }
1280
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001281 if (!cmd.error && !data.error &&
1282 !memcmp(data_buf, tuning_block_pattern, size)) {
1283 /* tuning is successful at this tuning point */
1284 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001285 pr_debug("%s: %s: found *** good *** phase = %d\n",
1286 mmc_hostname(mmc), __func__, phase);
1287 } else {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301288 /* Ignore crc errors occurred during tuning */
1289 if (cmd.error)
1290 mmc->err_stats[MMC_ERR_CMD_CRC]--;
1291 else if (data.error)
1292 mmc->err_stats[MMC_ERR_DAT_CRC]--;
Krishna Konda96e6b112013-10-28 15:25:03 -07001293 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001294 mmc_hostname(mmc), __func__, phase);
1295 }
1296 } while (++phase < 16);
1297
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301298 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1299 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001300 /*
1301 * If all phases pass then its a problem. So change the card's
1302 * drive type to a different value, if supported and repeat
1303 * tuning until at least one phase fails. Then set the original
1304 * drive type back.
1305 *
1306 * If all the phases still pass after trying all possible
1307 * drive types, then one of those 16 phases will be picked.
1308 * This is no different from what was going on before the
1309 * modification to change drive type and retune.
1310 */
1311 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1312 tuned_phase_cnt);
1313
1314 /* set drive type to other value . default setting is 0x0 */
1315 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001316 pr_debug("%s: trying different drive strength (%d)\n",
1317 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001318 if (card->ext_csd.raw_driver_strength &
1319 (1 << drv_type)) {
1320 sdhci_msm_set_mmc_drv_type(host, opcode,
1321 drv_type);
1322 if (!drv_type_changed)
1323 drv_type_changed = true;
1324 goto retry;
1325 }
1326 }
1327 }
1328
1329 /* reset drive type to default (50 ohm) if changed */
1330 if (drv_type_changed)
1331 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1332
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001333 if (tuned_phase_cnt) {
1334 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1335 tuned_phase_cnt);
1336 if (rc < 0)
1337 goto kfree;
1338 else
1339 phase = (u8)rc;
1340
1341 /*
1342 * Finally set the selected phase in delay
1343 * line hw block.
1344 */
1345 rc = msm_config_cm_dll_phase(host, phase);
1346 if (rc)
1347 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001348 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001349 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1350 mmc_hostname(mmc), __func__, phase);
1351 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301352 if (--tuning_seq_cnt)
1353 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001354 /* tuning failed */
1355 pr_err("%s: %s: no tuning point found\n",
1356 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301357 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001358 }
1359
1360kfree:
1361 kfree(data_buf);
1362out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001363 spin_lock_irqsave(&host->lock, flags);
1364 if (!rc)
1365 msm_host->tuning_done = true;
1366 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301367 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001368 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001369 return rc;
1370}
1371
Asutosh Das0ef24812012-12-18 16:14:02 +05301372static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1373{
1374 struct sdhci_msm_gpio_data *curr;
1375 int i, ret = 0;
1376
1377 curr = pdata->pin_data->gpio_data;
1378 for (i = 0; i < curr->size; i++) {
1379 if (!gpio_is_valid(curr->gpio[i].no)) {
1380 ret = -EINVAL;
1381 pr_err("%s: Invalid gpio = %d\n", __func__,
1382 curr->gpio[i].no);
1383 goto free_gpios;
1384 }
1385 if (enable) {
1386 ret = gpio_request(curr->gpio[i].no,
1387 curr->gpio[i].name);
1388 if (ret) {
1389 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1390 __func__, curr->gpio[i].no,
1391 curr->gpio[i].name, ret);
1392 goto free_gpios;
1393 }
1394 curr->gpio[i].is_enabled = true;
1395 } else {
1396 gpio_free(curr->gpio[i].no);
1397 curr->gpio[i].is_enabled = false;
1398 }
1399 }
1400 return ret;
1401
1402free_gpios:
1403 for (i--; i >= 0; i--) {
1404 gpio_free(curr->gpio[i].no);
1405 curr->gpio[i].is_enabled = false;
1406 }
1407 return ret;
1408}
1409
Can Guob903ad82017-10-17 13:22:53 +08001410static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
1411 unsigned int clock)
1412{
1413 int ret = 0;
1414
1415 if (clock > 150000000) {
1416 if (pdata->pctrl_data->pins_drv_type_200MHz)
1417 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1418 pdata->pctrl_data->pins_drv_type_200MHz);
1419 } else if (clock > 75000000) {
1420 if (pdata->pctrl_data->pins_drv_type_100MHz)
1421 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1422 pdata->pctrl_data->pins_drv_type_100MHz);
1423 } else if (clock > 400000) {
1424 if (pdata->pctrl_data->pins_drv_type_50MHz)
1425 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1426 pdata->pctrl_data->pins_drv_type_50MHz);
1427 } else {
1428 if (pdata->pctrl_data->pins_drv_type_400KHz)
1429 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1430 pdata->pctrl_data->pins_drv_type_400KHz);
1431 }
1432
1433 return ret;
1434}
1435
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301436static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1437 bool enable)
1438{
1439 int ret = 0;
1440
1441 if (enable)
1442 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1443 pdata->pctrl_data->pins_active);
1444 else
1445 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1446 pdata->pctrl_data->pins_sleep);
1447
1448 if (ret < 0)
1449 pr_err("%s state for pinctrl failed with %d\n",
1450 enable ? "Enabling" : "Disabling", ret);
1451
1452 return ret;
1453}
1454
Asutosh Das0ef24812012-12-18 16:14:02 +05301455static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1456{
1457 int ret = 0;
1458
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301459 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301460 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301461 } else if (pdata->pctrl_data) {
1462 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1463 goto out;
1464 } else if (!pdata->pin_data) {
1465 return 0;
1466 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301467
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301468 if (pdata->pin_data->is_gpio)
1469 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301470out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301471 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301472 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301473
1474 return ret;
1475}
1476
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301477static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1478 u32 **out, int *len, u32 size)
1479{
1480 int ret = 0;
1481 struct device_node *np = dev->of_node;
1482 size_t sz;
1483 u32 *arr = NULL;
1484
1485 if (!of_get_property(np, prop_name, len)) {
1486 ret = -EINVAL;
1487 goto out;
1488 }
1489 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001490 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301491 dev_err(dev, "%s invalid size\n", prop_name);
1492 ret = -EINVAL;
1493 goto out;
1494 }
1495
1496 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1497 if (!arr) {
1498 dev_err(dev, "%s failed allocating memory\n", prop_name);
1499 ret = -ENOMEM;
1500 goto out;
1501 }
1502
1503 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1504 if (ret < 0) {
1505 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1506 goto out;
1507 }
1508 *out = arr;
1509out:
1510 if (ret)
1511 *len = 0;
1512 return ret;
1513}
1514
Asutosh Das0ef24812012-12-18 16:14:02 +05301515#define MAX_PROP_SIZE 32
1516static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1517 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1518{
1519 int len, ret = 0;
1520 const __be32 *prop;
1521 char prop_name[MAX_PROP_SIZE];
1522 struct sdhci_msm_reg_data *vreg;
1523 struct device_node *np = dev->of_node;
1524
1525 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1526 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301527 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301528 return ret;
1529 }
1530
1531 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1532 if (!vreg) {
1533 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1534 ret = -ENOMEM;
1535 return ret;
1536 }
1537
1538 vreg->name = vreg_name;
1539
1540 snprintf(prop_name, MAX_PROP_SIZE,
1541 "qcom,%s-always-on", vreg_name);
1542 if (of_get_property(np, prop_name, NULL))
1543 vreg->is_always_on = true;
1544
1545 snprintf(prop_name, MAX_PROP_SIZE,
1546 "qcom,%s-lpm-sup", vreg_name);
1547 if (of_get_property(np, prop_name, NULL))
1548 vreg->lpm_sup = true;
1549
1550 snprintf(prop_name, MAX_PROP_SIZE,
1551 "qcom,%s-voltage-level", vreg_name);
1552 prop = of_get_property(np, prop_name, &len);
1553 if (!prop || (len != (2 * sizeof(__be32)))) {
1554 dev_warn(dev, "%s %s property\n",
1555 prop ? "invalid format" : "no", prop_name);
1556 } else {
1557 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1558 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1559 }
1560
1561 snprintf(prop_name, MAX_PROP_SIZE,
1562 "qcom,%s-current-level", vreg_name);
1563 prop = of_get_property(np, prop_name, &len);
1564 if (!prop || (len != (2 * sizeof(__be32)))) {
1565 dev_warn(dev, "%s %s property\n",
1566 prop ? "invalid format" : "no", prop_name);
1567 } else {
1568 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1569 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1570 }
1571
1572 *vreg_data = vreg;
1573 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1574 vreg->name, vreg->is_always_on ? "always_on," : "",
1575 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1576 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1577
1578 return ret;
1579}
1580
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301581static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1582 struct sdhci_msm_pltfm_data *pdata)
1583{
1584 struct sdhci_pinctrl_data *pctrl_data;
1585 struct pinctrl *pctrl;
1586 int ret = 0;
1587
1588 /* Try to obtain pinctrl handle */
1589 pctrl = devm_pinctrl_get(dev);
1590 if (IS_ERR(pctrl)) {
1591 ret = PTR_ERR(pctrl);
1592 goto out;
1593 }
1594 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1595 if (!pctrl_data) {
1596 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1597 ret = -ENOMEM;
1598 goto out;
1599 }
1600 pctrl_data->pctrl = pctrl;
1601 /* Look-up and keep the states handy to be used later */
1602 pctrl_data->pins_active = pinctrl_lookup_state(
1603 pctrl_data->pctrl, "active");
1604 if (IS_ERR(pctrl_data->pins_active)) {
1605 ret = PTR_ERR(pctrl_data->pins_active);
1606 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1607 goto out;
1608 }
1609 pctrl_data->pins_sleep = pinctrl_lookup_state(
1610 pctrl_data->pctrl, "sleep");
1611 if (IS_ERR(pctrl_data->pins_sleep)) {
1612 ret = PTR_ERR(pctrl_data->pins_sleep);
1613 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1614 goto out;
1615 }
Can Guob903ad82017-10-17 13:22:53 +08001616
1617 pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
1618 pctrl_data->pctrl, "ds_400KHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301619 if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001620 dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301621 pctrl_data->pins_drv_type_400KHz = NULL;
1622 }
Can Guob903ad82017-10-17 13:22:53 +08001623
1624 pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
1625 pctrl_data->pctrl, "ds_50MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301626 if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001627 dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301628 pctrl_data->pins_drv_type_50MHz = NULL;
1629 }
Can Guob903ad82017-10-17 13:22:53 +08001630
1631 pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
1632 pctrl_data->pctrl, "ds_100MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301633 if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001634 dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301635 pctrl_data->pins_drv_type_100MHz = NULL;
1636 }
Can Guob903ad82017-10-17 13:22:53 +08001637
1638 pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
1639 pctrl_data->pctrl, "ds_200MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301640 if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001641 dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301642 pctrl_data->pins_drv_type_200MHz = NULL;
1643 }
Can Guob903ad82017-10-17 13:22:53 +08001644
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301645 pdata->pctrl_data = pctrl_data;
1646out:
1647 return ret;
1648}
1649
Asutosh Das0ef24812012-12-18 16:14:02 +05301650#define GPIO_NAME_MAX_LEN 32
1651static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1652 struct sdhci_msm_pltfm_data *pdata)
1653{
1654 int ret = 0, cnt, i;
1655 struct sdhci_msm_pin_data *pin_data;
1656 struct device_node *np = dev->of_node;
1657
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301658 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1659 if (!ret) {
1660 goto out;
1661 } else if (ret == -EPROBE_DEFER) {
1662 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1663 goto out;
1664 } else {
1665 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1666 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301667 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301668 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301669 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1670 if (!pin_data) {
1671 dev_err(dev, "No memory for pin_data\n");
1672 ret = -ENOMEM;
1673 goto out;
1674 }
1675
1676 cnt = of_gpio_count(np);
1677 if (cnt > 0) {
1678 pin_data->gpio_data = devm_kzalloc(dev,
1679 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1680 if (!pin_data->gpio_data) {
1681 dev_err(dev, "No memory for gpio_data\n");
1682 ret = -ENOMEM;
1683 goto out;
1684 }
1685 pin_data->gpio_data->size = cnt;
1686 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1687 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1688
1689 if (!pin_data->gpio_data->gpio) {
1690 dev_err(dev, "No memory for gpio\n");
1691 ret = -ENOMEM;
1692 goto out;
1693 }
1694
1695 for (i = 0; i < cnt; i++) {
1696 const char *name = NULL;
1697 char result[GPIO_NAME_MAX_LEN];
1698 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1699 of_property_read_string_index(np,
1700 "qcom,gpio-names", i, &name);
1701
1702 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1703 dev_name(dev), name ? name : "?");
1704 pin_data->gpio_data->gpio[i].name = result;
1705 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1706 pin_data->gpio_data->gpio[i].name,
1707 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301708 }
1709 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301710 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301711out:
1712 if (ret)
1713 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1714 return ret;
1715}
1716
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001717#ifdef CONFIG_SMP
1718static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1719{
1720 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1721}
1722#else
1723static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1724#endif
1725
Gilad Bronerc788a672015-09-08 15:39:11 +03001726static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1727 struct sdhci_msm_pltfm_data *pdata)
1728{
1729 struct device_node *np = dev->of_node;
1730 const char *str;
1731 u32 cpu;
1732 int ret = 0;
1733 int i;
1734
1735 pdata->pm_qos_data.irq_valid = false;
1736 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1737 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1738 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001739 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001740 }
1741
1742 /* must specify cpu for "affine_cores" type */
1743 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1744 pdata->pm_qos_data.irq_cpu = -1;
1745 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1746 if (ret) {
1747 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1748 ret);
1749 goto out;
1750 }
1751 if (cpu < 0 || cpu >= num_possible_cpus()) {
1752 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1753 __func__, cpu, num_possible_cpus());
1754 ret = -EINVAL;
1755 goto out;
1756 }
1757 pdata->pm_qos_data.irq_cpu = cpu;
1758 }
1759
1760 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1761 SDHCI_POWER_POLICY_NUM) {
1762 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1763 __func__, SDHCI_POWER_POLICY_NUM);
1764 ret = -EINVAL;
1765 goto out;
1766 }
1767
1768 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1769 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1770 &pdata->pm_qos_data.irq_latency.latency[i]);
1771
1772 pdata->pm_qos_data.irq_valid = true;
1773out:
1774 return ret;
1775}
1776
1777static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1778 struct sdhci_msm_pltfm_data *pdata)
1779{
1780 struct device_node *np = dev->of_node;
1781 u32 mask;
1782 int nr_groups;
1783 int ret;
1784 int i;
1785
1786 /* Read cpu group mapping */
1787 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1788 if (nr_groups <= 0) {
1789 ret = -EINVAL;
1790 goto out;
1791 }
1792 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1793 pdata->pm_qos_data.cpu_group_map.mask =
1794 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1795 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1796 ret = -ENOMEM;
1797 goto out;
1798 }
1799
1800 for (i = 0; i < nr_groups; i++) {
1801 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1802 i, &mask);
1803
1804 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1805 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1806 cpu_possible_mask)) {
1807 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1808 __func__, mask, i);
1809 ret = -EINVAL;
1810 goto free_res;
1811 }
1812 }
1813 return 0;
1814
1815free_res:
1816 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1817out:
1818 return ret;
1819}
1820
1821static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1822 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1823{
1824 struct device_node *np = dev->of_node;
1825 struct sdhci_msm_pm_qos_latency *values;
1826 int ret;
1827 int i;
1828 int group;
1829 int cfg;
1830
1831 ret = of_property_count_u32_elems(np, name);
1832 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1833 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1834 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1835 ret);
1836 return -EINVAL;
1837 } else if (ret < 0) {
1838 return ret;
1839 }
1840
1841 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1842 GFP_KERNEL);
1843 if (!values)
1844 return -ENOMEM;
1845
1846 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1847 group = i / SDHCI_POWER_POLICY_NUM;
1848 cfg = i % SDHCI_POWER_POLICY_NUM;
1849 of_property_read_u32_index(np, name, i,
1850 &(values[group].latency[cfg]));
1851 }
1852
1853 *latency = values;
1854 return 0;
1855}
1856
1857static void sdhci_msm_pm_qos_parse(struct device *dev,
1858 struct sdhci_msm_pltfm_data *pdata)
1859{
1860 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1861 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1862 __func__);
1863
1864 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1865 pdata->pm_qos_data.cmdq_valid =
1866 !sdhci_msm_pm_qos_parse_latency(dev,
1867 "qcom,pm-qos-cmdq-latency-us",
1868 pdata->pm_qos_data.cpu_group_map.nr_groups,
1869 &pdata->pm_qos_data.cmdq_latency);
1870 pdata->pm_qos_data.legacy_valid =
1871 !sdhci_msm_pm_qos_parse_latency(dev,
1872 "qcom,pm-qos-legacy-latency-us",
1873 pdata->pm_qos_data.cpu_group_map.nr_groups,
1874 &pdata->pm_qos_data.latency);
1875 if (!pdata->pm_qos_data.cmdq_valid &&
1876 !pdata->pm_qos_data.legacy_valid) {
1877 /* clean-up previously allocated arrays */
1878 kfree(pdata->pm_qos_data.latency);
1879 kfree(pdata->pm_qos_data.cmdq_latency);
1880 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1881 __func__);
1882 }
1883 } else {
1884 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1885 __func__);
1886 }
1887}
1888
Asutosh Das1c43b132018-01-11 18:08:40 +05301889#ifdef CONFIG_NVMEM
1890/* Parse qfprom data for deciding on errata work-arounds */
1891static long qfprom_read(struct device *dev, const char *name)
1892{
1893 struct nvmem_cell *cell;
1894 ssize_t len = 0;
1895 u32 *buf, val = 0;
1896 long err = 0;
1897
1898 cell = nvmem_cell_get(dev, name);
1899 if (IS_ERR(cell)) {
1900 err = PTR_ERR(cell);
1901 dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
1902 /* If entry does not exist, then that is not an error */
1903 if (err == -ENOENT)
1904 err = 0;
1905 return err;
1906 }
1907
1908 buf = (u32 *)nvmem_cell_read(cell, &len);
1909 if (IS_ERR(buf) || !len) {
1910 dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
1911 *buf, len);
1912 if (!IS_ERR(buf)) {
1913 kfree(buf);
1914 err = -EINVAL;
1915 } else {
1916 err = PTR_ERR(buf);
1917 }
1918 } else {
Asutosh Dasb8614aa2018-01-31 15:44:15 +05301919 /*
1920 * 30 bits from bit offset 0 would be read.
1921 * We're interested in bits 28:29
1922 */
1923 val = (*buf >> 28) & 0x3;
Asutosh Das1c43b132018-01-11 18:08:40 +05301924 kfree(buf);
1925 }
1926
1927 nvmem_cell_put(cell);
1928 return err ? err : (long) val;
1929}
1930
1931/* Reads the SoC version */
1932static int sdhci_msm_get_socrev(struct device *dev,
1933 struct sdhci_msm_host *msm_host)
1934{
1935
1936 msm_host->soc_min_rev = qfprom_read(dev, "minor_rev");
1937
1938 if (msm_host->soc_min_rev < 0)
1939 dev_err(dev, "failed getting soc_min_rev, err : %d\n",
1940 msm_host->soc_min_rev);
1941 return msm_host->soc_min_rev;
1942}
1943#else
1944/* Reads the SoC version */
1945static int sdhci_msm_get_socrev(struct device *dev,
1946 struct sdhci_msm_host *msm_host)
1947{
1948 return 0;
1949}
1950#endif
1951
Asutosh Das0ef24812012-12-18 16:14:02 +05301952/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001953static
1954struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1955 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301956{
1957 struct sdhci_msm_pltfm_data *pdata = NULL;
1958 struct device_node *np = dev->of_node;
1959 u32 bus_width = 0;
1960 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301961 int clk_table_len;
1962 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301963 int ice_clk_table_len;
1964 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301965 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301966 const char *lower_bus_speed = NULL;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05301967 int bus_clk_table_len;
1968 u32 *bus_clk_table = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301969
1970 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1971 if (!pdata) {
1972 dev_err(dev, "failed to allocate memory for platform data\n");
1973 goto out;
1974 }
1975
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301976 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001977 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301978 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301979
Asutosh Das0ef24812012-12-18 16:14:02 +05301980 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1981 if (bus_width == 8)
1982 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1983 else if (bus_width == 4)
1984 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1985 else {
1986 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1987 pdata->mmc_bus_width = 0;
1988 }
1989
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001990 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301991 &msm_host->mmc->clk_scaling.pltfm_freq_table,
1992 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001993 pr_debug("%s: no clock scaling frequencies were supplied\n",
1994 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05301995 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
1996 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
1997 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001998
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301999 /*
2000 * Few hosts can support DDR52 mode at the same lower
2001 * system voltage corner as high-speed mode. In such cases,
2002 * it is always better to put it in DDR mode which will
2003 * improve the performance without any power impact.
2004 */
2005 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
2006 &lower_bus_speed)) {
2007 if (!strcmp(lower_bus_speed, "DDR52"))
2008 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
2009 MMC_SCALING_LOWER_DDR52_MODE;
2010 }
2011
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302012 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
2013 &clk_table, &clk_table_len, 0)) {
2014 dev_err(dev, "failed parsing supported clock rates\n");
2015 goto out;
2016 }
2017 if (!clk_table || !clk_table_len) {
2018 dev_err(dev, "Invalid clock table\n");
2019 goto out;
2020 }
2021 pdata->sup_clk_table = clk_table;
2022 pdata->sup_clk_cnt = clk_table_len;
2023
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302024 if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
2025 &bus_clk_table, &bus_clk_table_len, 0)) {
2026 if (bus_clk_table && bus_clk_table_len) {
2027 pdata->bus_clk_table = bus_clk_table;
2028 pdata->bus_clk_cnt = bus_clk_table_len;
2029 }
2030 }
2031
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302032 if (msm_host->ice.pdev) {
2033 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
2034 &ice_clk_table, &ice_clk_table_len, 0)) {
2035 dev_err(dev, "failed parsing supported ice clock rates\n");
2036 goto out;
2037 }
2038 if (!ice_clk_table || !ice_clk_table_len) {
2039 dev_err(dev, "Invalid clock table\n");
2040 goto out;
2041 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05302042 if (ice_clk_table_len != 2) {
2043 dev_err(dev, "Need max and min frequencies in the table\n");
2044 goto out;
2045 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302046 pdata->sup_ice_clk_table = ice_clk_table;
2047 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05302048 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
2049 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
2050 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
2051 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302052 }
2053
Asutosh Das0ef24812012-12-18 16:14:02 +05302054 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
2055 sdhci_msm_slot_reg_data),
2056 GFP_KERNEL);
2057 if (!pdata->vreg_data) {
2058 dev_err(dev, "failed to allocate memory for vreg data\n");
2059 goto out;
2060 }
2061
2062 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
2063 "vdd")) {
2064 dev_err(dev, "failed parsing vdd data\n");
2065 goto out;
2066 }
2067 if (sdhci_msm_dt_parse_vreg_info(dev,
2068 &pdata->vreg_data->vdd_io_data,
2069 "vdd-io")) {
2070 dev_err(dev, "failed parsing vdd-io data\n");
2071 goto out;
2072 }
2073
2074 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
2075 dev_err(dev, "failed parsing gpio data\n");
2076 goto out;
2077 }
2078
Asutosh Das0ef24812012-12-18 16:14:02 +05302079 len = of_property_count_strings(np, "qcom,bus-speed-mode");
2080
2081 for (i = 0; i < len; i++) {
2082 const char *name = NULL;
2083
2084 of_property_read_string_index(np,
2085 "qcom,bus-speed-mode", i, &name);
2086 if (!name)
2087 continue;
2088
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002089 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
2090 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
2091 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
2092 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
2093 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05302094 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2095 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
2096 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2097 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
2098 pdata->caps |= MMC_CAP_1_8V_DDR
2099 | MMC_CAP_UHS_DDR50;
2100 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
2101 pdata->caps |= MMC_CAP_1_2V_DDR
2102 | MMC_CAP_UHS_DDR50;
2103 }
2104
2105 if (of_get_property(np, "qcom,nonremovable", NULL))
2106 pdata->nonremovable = true;
2107
Guoping Yuf7c91332014-08-20 16:56:18 +08002108 if (of_get_property(np, "qcom,nonhotplug", NULL))
2109 pdata->nonhotplug = true;
2110
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08002111 pdata->largeaddressbus =
2112 of_property_read_bool(np, "qcom,large-address-bus");
2113
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002114 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
2115 msm_host->mmc->wakeup_on_idle = true;
2116
Gilad Bronerc788a672015-09-08 15:39:11 +03002117 sdhci_msm_pm_qos_parse(dev, pdata);
2118
Pavan Anamula5a256df2015-10-16 14:38:28 +05302119 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302120 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05302121
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002122 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002123 msm_host->regs_restore.is_supported =
2124 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002125
Vijay Viswanatha5492612017-10-17 15:38:55 +05302126 if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
2127 pdata->rclk_wa = true;
2128
Asutosh Das1c43b132018-01-11 18:08:40 +05302129 /*
2130 * rclk_wa is not required if soc version is mentioned and
2131 * is not base version.
2132 */
2133 if (msm_host->soc_min_rev != 0)
2134 pdata->rclk_wa = false;
2135
Asutosh Das0ef24812012-12-18 16:14:02 +05302136 return pdata;
2137out:
2138 return NULL;
2139}
2140
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302141/* Returns required bandwidth in Bytes per Sec */
2142static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
2143 struct mmc_ios *ios)
2144{
Sahitya Tummala2886c922013-04-03 18:03:31 +05302145 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2146 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2147
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302148 unsigned int bw;
2149
Sahitya Tummala2886c922013-04-03 18:03:31 +05302150 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302151 /*
2152 * For DDR mode, SDCC controller clock will be at
2153 * the double rate than the actual clock that goes to card.
2154 */
2155 if (ios->bus_width == MMC_BUS_WIDTH_4)
2156 bw /= 2;
2157 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2158 bw /= 8;
2159
2160 return bw;
2161}
2162
2163static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2164 unsigned int bw)
2165{
2166 unsigned int *table = host->pdata->voting_data->bw_vecs;
2167 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2168 int i;
2169
2170 if (host->msm_bus_vote.is_max_bw_needed && bw)
2171 return host->msm_bus_vote.max_bw_vote;
2172
2173 for (i = 0; i < size; i++) {
2174 if (bw <= table[i])
2175 break;
2176 }
2177
2178 if (i && (i == size))
2179 i--;
2180
2181 return i;
2182}
2183
2184/*
2185 * This function must be called with host lock acquired.
2186 * Caller of this function should also ensure that msm bus client
2187 * handle is not null.
2188 */
2189static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2190 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302191 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302192{
2193 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2194 int rc = 0;
2195
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302196 BUG_ON(!flags);
2197
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302198 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302199 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302200 rc = msm_bus_scale_client_update_request(
2201 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302202 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302203 if (rc) {
2204 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2205 mmc_hostname(host->mmc),
2206 msm_host->msm_bus_vote.client_handle, vote, rc);
2207 goto out;
2208 }
2209 msm_host->msm_bus_vote.curr_vote = vote;
2210 }
2211out:
2212 return rc;
2213}
2214
2215/*
2216 * Internal work. Work to set 0 bandwidth for msm bus.
2217 */
2218static void sdhci_msm_bus_work(struct work_struct *work)
2219{
2220 struct sdhci_msm_host *msm_host;
2221 struct sdhci_host *host;
2222 unsigned long flags;
2223
2224 msm_host = container_of(work, struct sdhci_msm_host,
2225 msm_bus_vote.vote_work.work);
2226 host = platform_get_drvdata(msm_host->pdev);
2227
2228 if (!msm_host->msm_bus_vote.client_handle)
2229 return;
2230
2231 spin_lock_irqsave(&host->lock, flags);
2232 /* don't vote for 0 bandwidth if any request is in progress */
2233 if (!host->mrq) {
2234 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302235 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302236 } else
2237 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2238 mmc_hostname(host->mmc), __func__);
2239 spin_unlock_irqrestore(&host->lock, flags);
2240}
2241
2242/*
2243 * This function cancels any scheduled delayed work and sets the bus
2244 * vote based on bw (bandwidth) argument.
2245 */
2246static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2247 unsigned int bw)
2248{
2249 int vote;
2250 unsigned long flags;
2251 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2252 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2253
2254 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2255 spin_lock_irqsave(&host->lock, flags);
2256 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302257 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302258 spin_unlock_irqrestore(&host->lock, flags);
2259}
2260
2261#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2262
2263/* This function queues a work which will set the bandwidth requiement to 0 */
2264static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2265{
2266 unsigned long flags;
2267 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2268 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2269
2270 spin_lock_irqsave(&host->lock, flags);
2271 if (msm_host->msm_bus_vote.min_bw_vote !=
2272 msm_host->msm_bus_vote.curr_vote)
2273 queue_delayed_work(system_wq,
2274 &msm_host->msm_bus_vote.vote_work,
2275 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2276 spin_unlock_irqrestore(&host->lock, flags);
2277}
2278
2279static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2280 struct platform_device *pdev)
2281{
2282 int rc = 0;
2283 struct msm_bus_scale_pdata *bus_pdata;
2284
2285 struct sdhci_msm_bus_voting_data *data;
2286 struct device *dev = &pdev->dev;
2287
2288 data = devm_kzalloc(dev,
2289 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2290 if (!data) {
2291 dev_err(&pdev->dev,
2292 "%s: failed to allocate memory\n", __func__);
2293 rc = -ENOMEM;
2294 goto out;
2295 }
2296 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2297 if (data->bus_pdata) {
2298 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2299 &data->bw_vecs, &data->bw_vecs_size, 0);
2300 if (rc) {
2301 dev_err(&pdev->dev,
2302 "%s: Failed to get bus-bw-vectors-bps\n",
2303 __func__);
2304 goto out;
2305 }
2306 host->pdata->voting_data = data;
2307 }
2308 if (host->pdata->voting_data &&
2309 host->pdata->voting_data->bus_pdata &&
2310 host->pdata->voting_data->bw_vecs &&
2311 host->pdata->voting_data->bw_vecs_size) {
2312
2313 bus_pdata = host->pdata->voting_data->bus_pdata;
2314 host->msm_bus_vote.client_handle =
2315 msm_bus_scale_register_client(bus_pdata);
2316 if (!host->msm_bus_vote.client_handle) {
2317 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2318 rc = -EFAULT;
2319 goto out;
2320 }
2321 /* cache the vote index for minimum and maximum bandwidth */
2322 host->msm_bus_vote.min_bw_vote =
2323 sdhci_msm_bus_get_vote_for_bw(host, 0);
2324 host->msm_bus_vote.max_bw_vote =
2325 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2326 } else {
2327 devm_kfree(dev, data);
2328 }
2329
2330out:
2331 return rc;
2332}
2333
2334static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2335{
2336 if (host->msm_bus_vote.client_handle)
2337 msm_bus_scale_unregister_client(
2338 host->msm_bus_vote.client_handle);
2339}
2340
2341static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2342{
2343 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2344 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2345 struct mmc_ios *ios = &host->mmc->ios;
2346 unsigned int bw;
2347
2348 if (!msm_host->msm_bus_vote.client_handle)
2349 return;
2350
2351 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302352 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302353 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302354 } else {
2355 /*
2356 * If clock gating is enabled, then remove the vote
2357 * immediately because clocks will be disabled only
2358 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2359 * additional delay is required to remove the bus vote.
2360 */
2361#ifdef CONFIG_MMC_CLKGATE
2362 if (host->mmc->clkgate_delay)
2363 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2364 else
2365#endif
2366 sdhci_msm_bus_queue_work(host);
2367 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302368}
2369
Asutosh Das0ef24812012-12-18 16:14:02 +05302370/* Regulator utility functions */
2371static int sdhci_msm_vreg_init_reg(struct device *dev,
2372 struct sdhci_msm_reg_data *vreg)
2373{
2374 int ret = 0;
2375
2376 /* check if regulator is already initialized? */
2377 if (vreg->reg)
2378 goto out;
2379
2380 /* Get the regulator handle */
2381 vreg->reg = devm_regulator_get(dev, vreg->name);
2382 if (IS_ERR(vreg->reg)) {
2383 ret = PTR_ERR(vreg->reg);
2384 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2385 __func__, vreg->name, ret);
2386 goto out;
2387 }
2388
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302389 if (regulator_count_voltages(vreg->reg) > 0) {
2390 vreg->set_voltage_sup = true;
2391 /* sanity check */
2392 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2393 pr_err("%s: %s invalid constraints specified\n",
2394 __func__, vreg->name);
2395 ret = -EINVAL;
2396 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302397 }
2398
2399out:
2400 return ret;
2401}
2402
2403static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2404{
2405 if (vreg->reg)
2406 devm_regulator_put(vreg->reg);
2407}
2408
2409static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2410 *vreg, int uA_load)
2411{
2412 int ret = 0;
2413
2414 /*
2415 * regulators that do not support regulator_set_voltage also
2416 * do not support regulator_set_optimum_mode
2417 */
2418 if (vreg->set_voltage_sup) {
2419 ret = regulator_set_load(vreg->reg, uA_load);
2420 if (ret < 0)
2421 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2422 __func__, vreg->name, uA_load, ret);
2423 else
2424 /*
2425 * regulator_set_load() can return non zero
2426 * value even for success case.
2427 */
2428 ret = 0;
2429 }
2430 return ret;
2431}
2432
2433static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2434 int min_uV, int max_uV)
2435{
2436 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302437 if (vreg->set_voltage_sup) {
2438 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2439 if (ret) {
2440 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302441 __func__, vreg->name, min_uV, max_uV, ret);
2442 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302443 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302444
2445 return ret;
2446}
2447
2448static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2449{
2450 int ret = 0;
2451
2452 /* Put regulator in HPM (high power mode) */
2453 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2454 if (ret < 0)
2455 return ret;
2456
2457 if (!vreg->is_enabled) {
2458 /* Set voltage level */
2459 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2460 vreg->high_vol_level);
2461 if (ret)
2462 return ret;
2463 }
2464 ret = regulator_enable(vreg->reg);
2465 if (ret) {
2466 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2467 __func__, vreg->name, ret);
2468 return ret;
2469 }
2470 vreg->is_enabled = true;
2471 return ret;
2472}
2473
2474static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2475{
2476 int ret = 0;
2477
2478 /* Never disable regulator marked as always_on */
2479 if (vreg->is_enabled && !vreg->is_always_on) {
2480 ret = regulator_disable(vreg->reg);
2481 if (ret) {
2482 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2483 __func__, vreg->name, ret);
2484 goto out;
2485 }
2486 vreg->is_enabled = false;
2487
2488 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2489 if (ret < 0)
2490 goto out;
2491
2492 /* Set min. voltage level to 0 */
2493 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2494 if (ret)
2495 goto out;
2496 } else if (vreg->is_enabled && vreg->is_always_on) {
2497 if (vreg->lpm_sup) {
2498 /* Put always_on regulator in LPM (low power mode) */
2499 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2500 vreg->lpm_uA);
2501 if (ret < 0)
2502 goto out;
2503 }
2504 }
2505out:
2506 return ret;
2507}
2508
2509static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2510 bool enable, bool is_init)
2511{
2512 int ret = 0, i;
2513 struct sdhci_msm_slot_reg_data *curr_slot;
2514 struct sdhci_msm_reg_data *vreg_table[2];
2515
2516 curr_slot = pdata->vreg_data;
2517 if (!curr_slot) {
2518 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2519 __func__);
2520 goto out;
2521 }
2522
2523 vreg_table[0] = curr_slot->vdd_data;
2524 vreg_table[1] = curr_slot->vdd_io_data;
2525
2526 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2527 if (vreg_table[i]) {
2528 if (enable)
2529 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2530 else
2531 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2532 if (ret)
2533 goto out;
2534 }
2535 }
2536out:
2537 return ret;
2538}
2539
Asutosh Das0ef24812012-12-18 16:14:02 +05302540/* This init function should be called only once for each SDHC slot */
2541static int sdhci_msm_vreg_init(struct device *dev,
2542 struct sdhci_msm_pltfm_data *pdata,
2543 bool is_init)
2544{
2545 int ret = 0;
2546 struct sdhci_msm_slot_reg_data *curr_slot;
2547 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2548
2549 curr_slot = pdata->vreg_data;
2550 if (!curr_slot)
2551 goto out;
2552
2553 curr_vdd_reg = curr_slot->vdd_data;
2554 curr_vdd_io_reg = curr_slot->vdd_io_data;
2555
2556 if (!is_init)
2557 /* Deregister all regulators from regulator framework */
2558 goto vdd_io_reg_deinit;
2559
2560 /*
2561 * Get the regulator handle from voltage regulator framework
2562 * and then try to set the voltage level for the regulator
2563 */
2564 if (curr_vdd_reg) {
2565 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2566 if (ret)
2567 goto out;
2568 }
2569 if (curr_vdd_io_reg) {
2570 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2571 if (ret)
2572 goto vdd_reg_deinit;
2573 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302574
Asutosh Das0ef24812012-12-18 16:14:02 +05302575 if (ret)
2576 dev_err(dev, "vreg reset failed (%d)\n", ret);
2577 goto out;
2578
2579vdd_io_reg_deinit:
2580 if (curr_vdd_io_reg)
2581 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2582vdd_reg_deinit:
2583 if (curr_vdd_reg)
2584 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2585out:
2586 return ret;
2587}
2588
2589
2590static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2591 enum vdd_io_level level,
2592 unsigned int voltage_level)
2593{
2594 int ret = 0;
2595 int set_level;
2596 struct sdhci_msm_reg_data *vdd_io_reg;
2597
2598 if (!pdata->vreg_data)
2599 return ret;
2600
2601 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2602 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2603 switch (level) {
2604 case VDD_IO_LOW:
2605 set_level = vdd_io_reg->low_vol_level;
2606 break;
2607 case VDD_IO_HIGH:
2608 set_level = vdd_io_reg->high_vol_level;
2609 break;
2610 case VDD_IO_SET_LEVEL:
2611 set_level = voltage_level;
2612 break;
2613 default:
2614 pr_err("%s: invalid argument level = %d",
2615 __func__, level);
2616 ret = -EINVAL;
2617 return ret;
2618 }
2619 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2620 set_level);
2621 }
2622 return ret;
2623}
2624
Ritesh Harjani42876f42015-11-17 17:46:51 +05302625/*
2626 * Acquire spin-lock host->lock before calling this function
2627 */
2628static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2629 bool enable)
2630{
2631 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2632 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2633
2634 if (enable && !msm_host->is_sdiowakeup_enabled)
2635 enable_irq(msm_host->pdata->sdiowakeup_irq);
2636 else if (!enable && msm_host->is_sdiowakeup_enabled)
2637 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2638 else
2639 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2640 __func__, enable, msm_host->is_sdiowakeup_enabled);
2641 msm_host->is_sdiowakeup_enabled = enable;
2642}
2643
2644static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2645{
2646 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302647 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2648 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2649
Ritesh Harjani42876f42015-11-17 17:46:51 +05302650 unsigned long flags;
2651
2652 pr_debug("%s: irq (%d) received\n", __func__, irq);
2653
2654 spin_lock_irqsave(&host->lock, flags);
2655 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2656 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302657 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302658
2659 return IRQ_HANDLED;
2660}
2661
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302662void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2663{
2664 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2665 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302666 const struct sdhci_msm_offset *msm_host_offset =
2667 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302668 unsigned int irq_flags = 0;
2669 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302670
Siba Prasad0196fe42017-06-27 15:13:27 +05302671 if (pwr_irq_desc)
2672 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2673 state_use_accessors);
2674
2675 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302676 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302677 sdhci_msm_readl_relaxed(host,
2678 msm_host_offset->CORE_PWRCTL_STATUS),
2679 sdhci_msm_readl_relaxed(host,
2680 msm_host_offset->CORE_PWRCTL_MASK),
2681 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302682 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2683
2684 MMC_TRACE(host->mmc,
2685 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2686 __func__,
2687 sdhci_msm_readb_relaxed(host,
2688 msm_host_offset->CORE_PWRCTL_STATUS),
2689 sdhci_msm_readb_relaxed(host,
2690 msm_host_offset->CORE_PWRCTL_MASK),
2691 sdhci_msm_readb_relaxed(host,
2692 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302693}
2694
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002695static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value)
2696{
2697 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2698 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2699 const struct sdhci_msm_offset *msm_host_offset = msm_host->offset;
2700 int ret = 0, retry = 10;
2701
2702 /*
2703 * There is a rare HW scenario where the first clear pulse could be
2704 * lost when actual reset and clear/read of status register is
2705 * happening at a time. Hence, retry for at least 10 times to make
2706 * sure status register is cleared. Otherwise, this will result in
2707 * a spurious power IRQ resulting in system instability.
2708 */
2709 do {
2710 if (retry == 0) {
2711 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2712 mmc_hostname(host->mmc), value);
2713 sdhci_msm_dump_pwr_ctrl_regs(host);
2714 WARN_ON(1);
2715 ret = -EBUSY;
2716 break;
2717 }
2718
2719 /*
2720 * Clear the PWRCTL_STATUS interrupt bits by writing to the
2721 * corresponding bits in the PWRCTL_CLEAR register.
2722 */
2723 sdhci_msm_writeb_relaxed(value, host,
2724 msm_host_offset->CORE_PWRCTL_CLEAR);
2725 /*
2726 * SDHC has core_mem and hc_mem device memory and these memory
2727 * addresses do not fall within 1KB region. Hence, any update
2728 * to core_mem address space would require an mb() to ensure
2729 * this gets completed before its next update to registers
2730 * within hc_mem.
2731 */
2732 mb();
2733 retry--;
2734 udelay(10);
2735 } while (value & sdhci_msm_readb_relaxed(host,
2736 msm_host_offset->CORE_PWRCTL_STATUS));
2737
2738 return ret;
2739}
2740
Asutosh Das0ef24812012-12-18 16:14:02 +05302741static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2742{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002743 struct sdhci_host *host = (struct sdhci_host *)data;
2744 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2745 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302746 const struct sdhci_msm_offset *msm_host_offset =
2747 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302748 u8 irq_status = 0;
2749 u8 irq_ack = 0;
2750 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302751 int pwr_state = 0, io_level = 0;
2752 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05302753
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302754 irq_status = sdhci_msm_readb_relaxed(host,
2755 msm_host_offset->CORE_PWRCTL_STATUS);
2756
Asutosh Das0ef24812012-12-18 16:14:02 +05302757 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2758 mmc_hostname(msm_host->mmc), irq, irq_status);
2759
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002760 sdhci_msm_clear_pwrctl_status(host, irq_status);
Asutosh Das0ef24812012-12-18 16:14:02 +05302761
2762 /* Handle BUS ON/OFF*/
2763 if (irq_status & CORE_PWRCTL_BUS_ON) {
2764 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302765 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302766 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302767 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2768 VDD_IO_HIGH, 0);
2769 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302770 if (ret)
2771 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2772 else
2773 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302774
2775 pwr_state = REQ_BUS_ON;
2776 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302777 }
2778 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302779 if (msm_host->pltfm_init_done)
2780 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2781 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302782 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302783 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302784 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2785 VDD_IO_LOW, 0);
2786 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302787 if (ret)
2788 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2789 else
2790 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302791
2792 pwr_state = REQ_BUS_OFF;
2793 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302794 }
2795 /* Handle IO LOW/HIGH */
2796 if (irq_status & CORE_PWRCTL_IO_LOW) {
2797 /* Switch voltage Low */
2798 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2799 if (ret)
2800 irq_ack |= CORE_PWRCTL_IO_FAIL;
2801 else
2802 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302803
2804 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302805 }
2806 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2807 /* Switch voltage High */
2808 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2809 if (ret)
2810 irq_ack |= CORE_PWRCTL_IO_FAIL;
2811 else
2812 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302813
2814 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302815 }
2816
2817 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302818 sdhci_msm_writeb_relaxed(irq_ack, host,
2819 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302820 /*
2821 * SDHC has core_mem and hc_mem device memory and these memory
2822 * addresses do not fall within 1KB region. Hence, any update to
2823 * core_mem address space would require an mb() to ensure this gets
2824 * completed before its next update to registers within hc_mem.
2825 */
2826 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302827 if ((io_level & REQ_IO_HIGH) &&
2828 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2829 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302830 writel_relaxed((readl_relaxed(host->ioaddr +
2831 msm_host_offset->CORE_VENDOR_SPEC) &
2832 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2833 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002834 else if ((io_level & REQ_IO_LOW) ||
2835 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302836 writel_relaxed((readl_relaxed(host->ioaddr +
2837 msm_host_offset->CORE_VENDOR_SPEC) |
2838 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2839 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002840 mb();
2841
Asutosh Das0ef24812012-12-18 16:14:02 +05302842 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2843 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302844 spin_lock_irqsave(&host->lock, flags);
2845 if (pwr_state)
2846 msm_host->curr_pwr_state = pwr_state;
2847 if (io_level)
2848 msm_host->curr_io_level = io_level;
2849 complete(&msm_host->pwr_irq_completion);
2850 spin_unlock_irqrestore(&host->lock, flags);
2851
Asutosh Das0ef24812012-12-18 16:14:02 +05302852 return IRQ_HANDLED;
2853}
2854
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302855static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302856show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2857{
2858 struct sdhci_host *host = dev_get_drvdata(dev);
2859 int poll;
2860 unsigned long flags;
2861
2862 spin_lock_irqsave(&host->lock, flags);
2863 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2864 spin_unlock_irqrestore(&host->lock, flags);
2865
2866 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2867}
2868
2869static ssize_t
2870store_polling(struct device *dev, struct device_attribute *attr,
2871 const char *buf, size_t count)
2872{
2873 struct sdhci_host *host = dev_get_drvdata(dev);
2874 int value;
2875 unsigned long flags;
2876
2877 if (!kstrtou32(buf, 0, &value)) {
2878 spin_lock_irqsave(&host->lock, flags);
2879 if (value) {
2880 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2881 mmc_detect_change(host->mmc, 0);
2882 } else {
2883 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2884 }
2885 spin_unlock_irqrestore(&host->lock, flags);
2886 }
2887 return count;
2888}
2889
2890static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302891show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2892 char *buf)
2893{
2894 struct sdhci_host *host = dev_get_drvdata(dev);
2895 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2896 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2897
2898 return snprintf(buf, PAGE_SIZE, "%u\n",
2899 msm_host->msm_bus_vote.is_max_bw_needed);
2900}
2901
2902static ssize_t
2903store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2904 const char *buf, size_t count)
2905{
2906 struct sdhci_host *host = dev_get_drvdata(dev);
2907 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2908 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2909 uint32_t value;
2910 unsigned long flags;
2911
2912 if (!kstrtou32(buf, 0, &value)) {
2913 spin_lock_irqsave(&host->lock, flags);
2914 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2915 spin_unlock_irqrestore(&host->lock, flags);
2916 }
2917 return count;
2918}
2919
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302920static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302921{
2922 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2923 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302924 const struct sdhci_msm_offset *msm_host_offset =
2925 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302926 unsigned long flags;
2927 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302928 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302929
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302930 spin_lock_irqsave(&host->lock, flags);
2931 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2932 mmc_hostname(host->mmc), __func__, req_type,
2933 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302934 if (!msm_host->mci_removed)
2935 io_sig_sts = sdhci_msm_readl_relaxed(host,
2936 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302937
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302938 /*
2939 * The IRQ for request type IO High/Low will be generated when -
2940 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2941 * 2. If 1 is true and when there is a state change in 1.8V enable
2942 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2943 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2944 * layer tries to set it to 3.3V before card detection happens, the
2945 * IRQ doesn't get triggered as there is no state change in this bit.
2946 * The driver already handles this case by changing the IO voltage
2947 * level to high as part of controller power up sequence. Hence, check
2948 * for host->pwr to handle a case where IO voltage high request is
2949 * issued even before controller power up.
2950 */
2951 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2952 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2953 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2954 pr_debug("%s: do not wait for power IRQ that never comes\n",
2955 mmc_hostname(host->mmc));
2956 spin_unlock_irqrestore(&host->lock, flags);
2957 return;
2958 }
2959 }
2960
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302961 if ((req_type & msm_host->curr_pwr_state) ||
2962 (req_type & msm_host->curr_io_level))
2963 done = true;
2964 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302965
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302966 /*
2967 * This is needed here to hanlde a case where IRQ gets
2968 * triggered even before this function is called so that
2969 * x->done counter of completion gets reset. Otherwise,
2970 * next call to wait_for_completion returns immediately
2971 * without actually waiting for the IRQ to be handled.
2972 */
2973 if (done)
2974 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302975 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05302976 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05302977 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2978 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05302979 MMC_TRACE(host->mmc,
2980 "%s: request(%d) timed out waiting for pwr_irq\n",
2981 __func__, req_type);
2982 sdhci_msm_dump_pwr_ctrl_regs(host);
2983 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302984 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2985 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302986}
2987
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002988static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2989{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302990 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2991 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2992 const struct sdhci_msm_offset *msm_host_offset =
2993 msm_host->offset;
2994 u32 config = readl_relaxed(host->ioaddr +
2995 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302996
2997 if (enable) {
2998 config |= CORE_CDR_EN;
2999 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303000 writel_relaxed(config, host->ioaddr +
3001 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303002 } else {
3003 config &= ~CORE_CDR_EN;
3004 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303005 writel_relaxed(config, host->ioaddr +
3006 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303007 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003008}
3009
Asutosh Das648f9d12013-01-10 21:11:04 +05303010static unsigned int sdhci_msm_max_segs(void)
3011{
3012 return SDHCI_MSM_MAX_SEGMENTS;
3013}
3014
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303015static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303016{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303017 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3018 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303019
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303020 return msm_host->pdata->sup_clk_table[0];
3021}
3022
3023static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
3024{
3025 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3026 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3027 int max_clk_index = msm_host->pdata->sup_clk_cnt;
3028
3029 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
3030}
3031
3032static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
3033 u32 req_clk)
3034{
3035 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3036 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3037 unsigned int sel_clk = -1;
3038 unsigned char cnt;
3039
3040 if (req_clk < sdhci_msm_get_min_clock(host)) {
3041 sel_clk = sdhci_msm_get_min_clock(host);
3042 return sel_clk;
3043 }
3044
3045 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
3046 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
3047 break;
3048 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
3049 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3050 break;
3051 } else {
3052 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3053 }
3054 }
3055 return sel_clk;
3056}
3057
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303058static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
3059 u32 apps_clk)
3060{
3061 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3062 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3063 long sel_clk = -1;
3064 unsigned char cnt;
3065
3066 if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
3067 pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
3068 mmc_hostname(host->mmc), __func__,
3069 (unsigned int)msm_host->pdata->bus_clk_cnt,
3070 (unsigned int)msm_host->pdata->sup_clk_cnt);
3071 return msm_host->pdata->bus_clk_table[0];
3072 }
3073 if (apps_clk == sdhci_msm_get_min_clock(host)) {
3074 sel_clk = msm_host->pdata->bus_clk_table[0];
3075 return sel_clk;
3076 }
3077
3078 for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
3079 if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
3080 break;
3081 sel_clk = msm_host->pdata->bus_clk_table[cnt];
3082 }
3083 return sel_clk;
3084}
3085
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003086static void sdhci_msm_registers_save(struct sdhci_host *host)
3087{
3088 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3089 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3090 const struct sdhci_msm_offset *msm_host_offset =
3091 msm_host->offset;
3092
3093 if (!msm_host->regs_restore.is_supported)
3094 return;
3095
3096 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
3097 msm_host_offset->CORE_VENDOR_SPEC);
3098 msm_host->regs_restore.vendor_pwrctl_mask =
3099 readl_relaxed(host->ioaddr +
3100 msm_host_offset->CORE_PWRCTL_MASK);
3101 msm_host->regs_restore.vendor_func2 =
3102 readl_relaxed(host->ioaddr +
3103 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3104 msm_host->regs_restore.vendor_func3 =
3105 readl_relaxed(host->ioaddr +
3106 msm_host_offset->CORE_VENDOR_SPEC3);
3107 msm_host->regs_restore.hc_2c_2e =
3108 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
3109 msm_host->regs_restore.hc_3c_3e =
3110 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
3111 msm_host->regs_restore.vendor_pwrctl_ctl =
3112 readl_relaxed(host->ioaddr +
3113 msm_host_offset->CORE_PWRCTL_CTL);
3114 msm_host->regs_restore.hc_38_3a =
3115 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
3116 msm_host->regs_restore.hc_34_36 =
3117 sdhci_readl(host, SDHCI_INT_ENABLE);
3118 msm_host->regs_restore.hc_28_2a =
3119 sdhci_readl(host, SDHCI_HOST_CONTROL);
3120 msm_host->regs_restore.vendor_caps_0 =
3121 readl_relaxed(host->ioaddr +
3122 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3123 msm_host->regs_restore.hc_caps_1 =
3124 sdhci_readl(host, SDHCI_CAPABILITIES_1);
3125 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
3126 msm_host_offset->CORE_TESTBUS_CONFIG);
3127 msm_host->regs_restore.is_valid = true;
3128
3129 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
3130 mmc_hostname(host->mmc), __func__,
3131 readl_relaxed(host->ioaddr +
3132 msm_host_offset->CORE_PWRCTL_MASK));
3133}
3134
3135static void sdhci_msm_registers_restore(struct sdhci_host *host)
3136{
3137 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3138 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003139 u8 irq_status;
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003140 const struct sdhci_msm_offset *msm_host_offset =
3141 msm_host->offset;
3142
3143 if (!msm_host->regs_restore.is_supported ||
3144 !msm_host->regs_restore.is_valid)
3145 return;
3146
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003147 writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003148 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
3149 msm_host_offset->CORE_VENDOR_SPEC);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003150 writel_relaxed(msm_host->regs_restore.vendor_func2,
3151 host->ioaddr +
3152 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3153 writel_relaxed(msm_host->regs_restore.vendor_func3,
3154 host->ioaddr +
3155 msm_host_offset->CORE_VENDOR_SPEC3);
3156 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
3157 SDHCI_CLOCK_CONTROL);
3158 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
3159 SDHCI_AUTO_CMD_ERR);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003160 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
3161 SDHCI_SIGNAL_ENABLE);
3162 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
3163 SDHCI_INT_ENABLE);
3164 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
3165 SDHCI_HOST_CONTROL);
3166 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
3167 host->ioaddr +
3168 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3169 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
3170 SDHCI_CAPABILITIES_1);
3171 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
3172 msm_host_offset->CORE_TESTBUS_CONFIG);
3173 msm_host->regs_restore.is_valid = false;
3174
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003175 /*
3176 * Clear the PWRCTL_STATUS register.
3177 * There is a rare HW scenario where the first clear pulse could be
3178 * lost when actual reset and clear/read of status register is
3179 * happening at a time. Hence, retry for at least 10 times to make
3180 * sure status register is cleared. Otherwise, this will result in
3181 * a spurious power IRQ resulting in system instability.
3182 */
3183 irq_status = sdhci_msm_readb_relaxed(host,
3184 msm_host_offset->CORE_PWRCTL_STATUS);
3185
3186 sdhci_msm_clear_pwrctl_status(host, irq_status);
3187
3188 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
3189 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
3190 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
3191 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
3192
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003193 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
3194 mmc_hostname(host->mmc), __func__,
3195 readl_relaxed(host->ioaddr +
3196 msm_host_offset->CORE_PWRCTL_MASK));
3197}
3198
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303199static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
3200{
3201 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3202 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3203 int rc = 0;
3204
3205 if (atomic_read(&msm_host->controller_clock))
3206 return 0;
3207
3208 sdhci_msm_bus_voting(host, 1);
3209
3210 if (!IS_ERR(msm_host->pclk)) {
3211 rc = clk_prepare_enable(msm_host->pclk);
3212 if (rc) {
3213 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3214 mmc_hostname(host->mmc), __func__, rc);
3215 goto remove_vote;
3216 }
3217 }
3218
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303219 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3220 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3221 if (rc) {
3222 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3223 mmc_hostname(host->mmc), __func__, rc);
3224 goto disable_pclk;
3225 }
3226 }
3227
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303228 rc = clk_prepare_enable(msm_host->clk);
3229 if (rc) {
3230 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3231 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303232 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303233 }
3234
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303235 if (!IS_ERR(msm_host->ice_clk)) {
3236 rc = clk_prepare_enable(msm_host->ice_clk);
3237 if (rc) {
3238 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3239 mmc_hostname(host->mmc), __func__, rc);
3240 goto disable_host_clk;
3241 }
3242 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303243 atomic_set(&msm_host->controller_clock, 1);
3244 pr_debug("%s: %s: enabled controller clock\n",
3245 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003246 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303247 goto out;
3248
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303249disable_host_clk:
3250 if (!IS_ERR(msm_host->clk))
3251 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303252disable_bus_aggr_clk:
3253 if (!IS_ERR(msm_host->bus_aggr_clk))
3254 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303255disable_pclk:
3256 if (!IS_ERR(msm_host->pclk))
3257 clk_disable_unprepare(msm_host->pclk);
3258remove_vote:
3259 if (msm_host->msm_bus_vote.client_handle)
3260 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3261out:
3262 return rc;
3263}
3264
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303265static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3266{
3267 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3268 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303269
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303270 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003271 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303272 if (!IS_ERR(msm_host->clk))
3273 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303274 if (!IS_ERR(msm_host->ice_clk))
3275 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303276 if (!IS_ERR(msm_host->bus_aggr_clk))
3277 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303278 if (!IS_ERR(msm_host->pclk))
3279 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303280 sdhci_msm_bus_voting(host, 0);
3281 atomic_set(&msm_host->controller_clock, 0);
3282 pr_debug("%s: %s: disabled controller clock\n",
3283 mmc_hostname(host->mmc), __func__);
3284 }
3285}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303286
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303287static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3288{
3289 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3290 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3291 int rc = 0;
3292
3293 if (enable && !atomic_read(&msm_host->clks_on)) {
3294 pr_debug("%s: request to enable clocks\n",
3295 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303296
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303297 /*
3298 * The bus-width or the clock rate might have changed
3299 * after controller clocks are enbaled, update bus vote
3300 * in such case.
3301 */
3302 if (atomic_read(&msm_host->controller_clock))
3303 sdhci_msm_bus_voting(host, 1);
3304
3305 rc = sdhci_msm_enable_controller_clock(host);
3306 if (rc)
3307 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303308
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303309 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3310 rc = clk_prepare_enable(msm_host->bus_clk);
3311 if (rc) {
3312 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3313 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303314 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303315 }
3316 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003317 if (!IS_ERR(msm_host->ff_clk)) {
3318 rc = clk_prepare_enable(msm_host->ff_clk);
3319 if (rc) {
3320 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3321 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303322 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003323 }
3324 }
3325 if (!IS_ERR(msm_host->sleep_clk)) {
3326 rc = clk_prepare_enable(msm_host->sleep_clk);
3327 if (rc) {
3328 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3329 mmc_hostname(host->mmc), __func__, rc);
3330 goto disable_ff_clk;
3331 }
3332 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303333 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303334
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303335 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303336 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3337 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303338 /*
3339 * During 1.8V signal switching the clock source must
3340 * still be ON as it requires accessing SDHC
3341 * registers (SDHCi host control2 register bit 3 must
3342 * be written and polled after stopping the SDCLK).
3343 */
3344 if (host->mmc->card_clock_off)
3345 return 0;
3346 pr_debug("%s: request to disable clocks\n",
3347 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003348 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3349 clk_disable_unprepare(msm_host->sleep_clk);
3350 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3351 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303352 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3353 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003354 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303355 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303356 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303357 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003358disable_ff_clk:
3359 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3360 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303361disable_bus_clk:
3362 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3363 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303364disable_controller_clk:
3365 if (!IS_ERR_OR_NULL(msm_host->clk))
3366 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303367 if (!IS_ERR(msm_host->ice_clk))
3368 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303369 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3370 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303371 if (!IS_ERR_OR_NULL(msm_host->pclk))
3372 clk_disable_unprepare(msm_host->pclk);
3373 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303374remove_vote:
3375 if (msm_host->msm_bus_vote.client_handle)
3376 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303377out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303378 return rc;
3379}
3380
3381static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3382{
3383 int rc;
3384 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3385 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303386 const struct sdhci_msm_offset *msm_host_offset =
3387 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003388 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303389 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003390 u32 sup_clock, ddr_clock, dll_lock;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303391 long bus_clk_rate;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303392 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303393
3394 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303395 /*
3396 * disable pwrsave to ensure clock is not auto-gated until
3397 * the rate is >400KHz (initialization complete).
3398 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303399 writel_relaxed(readl_relaxed(host->ioaddr +
3400 msm_host_offset->CORE_VENDOR_SPEC) &
3401 ~CORE_CLK_PWRSAVE, host->ioaddr +
3402 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303403 sdhci_msm_prepare_clocks(host, false);
3404 host->clock = clock;
3405 goto out;
3406 }
3407
3408 rc = sdhci_msm_prepare_clocks(host, true);
3409 if (rc)
3410 goto out;
3411
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303412 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3413 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303414 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003415 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303416 writel_relaxed(readl_relaxed(host->ioaddr +
3417 msm_host_offset->CORE_VENDOR_SPEC)
3418 | CORE_CLK_PWRSAVE, host->ioaddr +
3419 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303420 /*
3421 * Disable pwrsave for a newly added card if doesn't allow clock
3422 * gating.
3423 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003424 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303425 writel_relaxed(readl_relaxed(host->ioaddr +
3426 msm_host_offset->CORE_VENDOR_SPEC)
3427 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3428 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303429
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303430 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003431 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003432 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003433 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303434 /*
3435 * The SDHC requires internal clock frequency to be double the
3436 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003437 * uses the faster clock(100/400MHz) for some of its parts and
3438 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303439 */
3440 ddr_clock = clock * 2;
3441 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3442 ddr_clock);
3443 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003444
3445 /*
3446 * In general all timing modes are controlled via UHS mode select in
3447 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3448 * their respective modes defined here, hence we use these values.
3449 *
3450 * HS200 - SDR104 (Since they both are equivalent in functionality)
3451 * HS400 - This involves multiple configurations
3452 * Initially SDR104 - when tuning is required as HS200
3453 * Then when switching to DDR @ 400MHz (HS400) we use
3454 * the vendor specific HC_SELECT_IN to control the mode.
3455 *
3456 * In addition to controlling the modes we also need to select the
3457 * correct input clock for DLL depending on the mode.
3458 *
3459 * HS400 - divided clock (free running MCLK/2)
3460 * All other modes - default (free running MCLK)
3461 */
3462 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3463 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303464 writel_relaxed(((readl_relaxed(host->ioaddr +
3465 msm_host_offset->CORE_VENDOR_SPEC)
3466 & ~CORE_HC_MCLK_SEL_MASK)
3467 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3468 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003469 /*
3470 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3471 * register
3472 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303473 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003474 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303475 msm_host->enhanced_strobe)) &&
3476 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003477 /*
3478 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3479 * field in VENDOR_SPEC_FUNC
3480 */
3481 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303482 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003483 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303484 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3485 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003486 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003487 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3488 /*
3489 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3490 * CORE_DLL_STATUS to be set. This should get set
3491 * with in 15 us at 200 MHz.
3492 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303493 rc = readl_poll_timeout(host->ioaddr +
3494 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003495 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3496 CORE_DDR_DLL_LOCK)), 10, 1000);
3497 if (rc == -ETIMEDOUT)
3498 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3499 mmc_hostname(host->mmc),
3500 dll_lock);
3501 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003502 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003503 if (!msm_host->use_cdclp533)
3504 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3505 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303506 msm_host_offset->CORE_VENDOR_SPEC3)
3507 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3508 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003509
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003510 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303511 writel_relaxed(((readl_relaxed(host->ioaddr +
3512 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003513 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303514 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3515 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003516
3517 /*
3518 * Disable HC_SELECT_IN to be able to use the UHS mode select
3519 * configuration from Host Control2 register for all other
3520 * modes.
3521 *
3522 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3523 * in VENDOR_SPEC_FUNC
3524 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303525 writel_relaxed((readl_relaxed(host->ioaddr +
3526 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003527 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303528 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3529 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003530 }
3531 mb();
3532
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303533 if (sup_clock != msm_host->clk_rate) {
3534 pr_debug("%s: %s: setting clk rate to %u\n",
3535 mmc_hostname(host->mmc), __func__, sup_clock);
3536 rc = clk_set_rate(msm_host->clk, sup_clock);
3537 if (rc) {
3538 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3539 mmc_hostname(host->mmc), __func__,
3540 sup_clock, rc);
3541 goto out;
3542 }
3543 msm_host->clk_rate = sup_clock;
3544 host->clock = clock;
Can Guob903ad82017-10-17 13:22:53 +08003545
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303546 if (!IS_ERR(msm_host->bus_aggr_clk) &&
3547 msm_host->pdata->bus_clk_cnt) {
3548 bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
3549 sup_clock);
3550 if (bus_clk_rate >= 0) {
3551 rc = clk_set_rate(msm_host->bus_aggr_clk,
3552 bus_clk_rate);
3553 if (rc) {
3554 pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
3555 mmc_hostname(host->mmc),
3556 __func__, bus_clk_rate, rc);
3557 goto out;
3558 }
3559 } else {
3560 pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
3561 mmc_hostname(host->mmc), __func__,
3562 sup_clock, bus_clk_rate);
3563 }
3564 }
3565
Can Guob903ad82017-10-17 13:22:53 +08003566 /* Configure pinctrl drive type according to
3567 * current clock rate
3568 */
3569 rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
3570 if (rc)
3571 pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
3572 mmc_hostname(host->mmc), __func__,
3573 clock, rc);
3574
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303575 /*
3576 * Update the bus vote in case of frequency change due to
3577 * clock scaling.
3578 */
3579 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303580 }
3581out:
3582 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303583}
3584
Sahitya Tummala14613432013-03-21 11:13:25 +05303585static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3586 unsigned int uhs)
3587{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003588 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3589 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303590 const struct sdhci_msm_offset *msm_host_offset =
3591 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303592 u16 ctrl_2;
3593
3594 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3595 /* Select Bus Speed Mode for host */
3596 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003597 if ((uhs == MMC_TIMING_MMC_HS400) ||
3598 (uhs == MMC_TIMING_MMC_HS200) ||
3599 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303600 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3601 else if (uhs == MMC_TIMING_UHS_SDR12)
3602 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3603 else if (uhs == MMC_TIMING_UHS_SDR25)
3604 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3605 else if (uhs == MMC_TIMING_UHS_SDR50)
3606 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003607 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3608 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303609 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303610 /*
3611 * When clock frquency is less than 100MHz, the feedback clock must be
3612 * provided and DLL must not be used so that tuning can be skipped. To
3613 * provide feedback clock, the mode selection can be any value less
3614 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3615 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003616 if (host->clock <= CORE_FREQ_100MHZ) {
3617 if ((uhs == MMC_TIMING_MMC_HS400) ||
3618 (uhs == MMC_TIMING_MMC_HS200) ||
3619 (uhs == MMC_TIMING_UHS_SDR104))
3620 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303621
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003622 /*
3623 * Make sure DLL is disabled when not required
3624 *
3625 * Write 1 to DLL_RST bit of DLL_CONFIG register
3626 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303627 writel_relaxed((readl_relaxed(host->ioaddr +
3628 msm_host_offset->CORE_DLL_CONFIG)
3629 | CORE_DLL_RST), host->ioaddr +
3630 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003631
3632 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303633 writel_relaxed((readl_relaxed(host->ioaddr +
3634 msm_host_offset->CORE_DLL_CONFIG)
3635 | CORE_DLL_PDN), host->ioaddr +
3636 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003637 mb();
3638
3639 /*
3640 * The DLL needs to be restored and CDCLP533 recalibrated
3641 * when the clock frequency is set back to 400MHz.
3642 */
3643 msm_host->calibration_done = false;
3644 }
3645
3646 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3647 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303648 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3649
3650}
3651
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003652#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003653#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303654static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003655{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303656 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303657 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3658 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303659 const struct sdhci_msm_offset *msm_host_offset =
3660 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303661 struct cmdq_host *cq_host = host->cq_host;
3662
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303663 u32 version = sdhci_msm_readl_relaxed(host,
3664 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003665 u16 minor = version & CORE_VERSION_TARGET_MASK;
3666 /* registers offset changed starting from 4.2.0 */
3667 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3668
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303669 if (cq_host->offset_changed)
3670 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003671 pr_err("---- Debug RAM dump ----\n");
3672 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3673 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3674 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3675
3676 while (i < 16) {
3677 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3678 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3679 i++;
3680 }
3681 pr_err("-------------------------\n");
3682}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303683
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303684static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3685{
3686 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3687 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3688 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3689
3690 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3691 sizeof(struct mmc_host));
3692 if (msm_host->mmc->card)
3693 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3694 sizeof(struct mmc_card));
3695 memcpy(&cached_data->copy_host, host,
3696 sizeof(struct sdhci_host));
3697}
3698
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303699void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3700{
3701 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3702 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303703 const struct sdhci_msm_offset *msm_host_offset =
3704 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303705 int tbsel, tbsel2;
3706 int i, index = 0;
3707 u32 test_bus_val = 0;
3708 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303709 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303710
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303711 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303712 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003713 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303714 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003715
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303716 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3717 sdhci_msm_readl_relaxed(host,
3718 msm_host_offset->CORE_MCI_DATA_CNT),
3719 sdhci_msm_readl_relaxed(host,
3720 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303721 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303722 sdhci_msm_readl_relaxed(host,
3723 msm_host_offset->CORE_MCI_DATA_CNT),
3724 sdhci_msm_readl_relaxed(host,
3725 msm_host_offset->CORE_MCI_FIFO_CNT),
3726 sdhci_msm_readl_relaxed(host,
3727 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303728 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303729 readl_relaxed(host->ioaddr +
3730 msm_host_offset->CORE_DLL_CONFIG),
3731 readl_relaxed(host->ioaddr +
3732 msm_host_offset->CORE_DLL_STATUS),
3733 sdhci_msm_readl_relaxed(host,
3734 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303735 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303736 readl_relaxed(host->ioaddr +
3737 msm_host_offset->CORE_VENDOR_SPEC),
3738 readl_relaxed(host->ioaddr +
3739 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3740 readl_relaxed(host->ioaddr +
3741 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303742 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303743 readl_relaxed(host->ioaddr +
3744 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303745
3746 /*
3747 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3748 * of CORE_TESTBUS_CONFIG register.
3749 *
3750 * To select test bus 0 to 7 use tbsel and to select any test bus
3751 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3752 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3753 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3754 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003755 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303756 for (tbsel = 0; tbsel < 8; tbsel++) {
3757 if (index >= MAX_TEST_BUS)
3758 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303759 test_bus_val =
3760 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3761 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3762 sdhci_msm_writel_relaxed(test_bus_val, host,
3763 msm_host_offset->CORE_TESTBUS_CONFIG);
3764 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3765 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303766 }
3767 }
3768 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3769 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3770 i, i + 3, debug_reg[i], debug_reg[i+1],
3771 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303772 if (host->is_crypto_en) {
3773 sdhci_msm_ice_get_status(host, &sts);
3774 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003775 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303776 }
3777}
3778
3779static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3780{
3781 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3782 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3783
3784 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303785 if (msm_host->ice.pdev) {
3786 if (msm_host->ice_hci_support)
3787 writel_relaxed(1, host->ioaddr +
3788 HC_VENDOR_SPECIFIC_ICE_CTRL);
3789 else
3790 writel_relaxed(1,
3791 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3792 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303793
3794 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003795}
3796
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303797/*
3798 * sdhci_msm_enhanced_strobe_mask :-
3799 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3800 * SW should write 3 to
3801 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3802 * The default reset value of this register is 2.
3803 */
3804static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3805{
3806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3807 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303808 const struct sdhci_msm_offset *msm_host_offset =
3809 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303810
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303811 if (!msm_host->enhanced_strobe ||
3812 !mmc_card_strobe(msm_host->mmc->card)) {
3813 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303814 mmc_hostname(host->mmc));
3815 return;
3816 }
3817
3818 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303819 writel_relaxed((readl_relaxed(host->ioaddr +
3820 msm_host_offset->CORE_VENDOR_SPEC3)
3821 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3822 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303823 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303824 writel_relaxed((readl_relaxed(host->ioaddr +
3825 msm_host_offset->CORE_VENDOR_SPEC3)
3826 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3827 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303828 }
3829}
3830
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003831static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3832{
3833 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3834 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303835 const struct sdhci_msm_offset *msm_host_offset =
3836 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003837
3838 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303839 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3840 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003841 } else {
3842 u32 value;
3843
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303844 value = sdhci_msm_readl_relaxed(host,
3845 msm_host_offset->CORE_TESTBUS_CONFIG);
3846 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3847 sdhci_msm_writel_relaxed(value, host,
3848 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003849 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303850}
3851
Pavan Anamula691dd592015-08-25 16:11:20 +05303852void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3853{
3854 u32 vendor_func2;
3855 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303856 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3857 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3858 const struct sdhci_msm_offset *msm_host_offset =
3859 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303860
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303861 vendor_func2 = readl_relaxed(host->ioaddr +
3862 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303863
3864 if (enable) {
3865 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303866 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303867 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303868 while (readl_relaxed(host->ioaddr +
3869 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303870 if (timeout == 0) {
3871 pr_info("%s: Applying wait idle disable workaround\n",
3872 mmc_hostname(host->mmc));
3873 /*
3874 * Apply the reset workaround to not wait for
3875 * pending data transfers on AXI before
3876 * resetting the controller. This could be
3877 * risky if the transfers were stuck on the
3878 * AXI bus.
3879 */
3880 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303881 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303882 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303883 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3884 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303885 host->reset_wa_t = ktime_get();
3886 return;
3887 }
3888 timeout--;
3889 udelay(10);
3890 }
3891 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3892 mmc_hostname(host->mmc));
3893 } else {
3894 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303895 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303896 }
3897}
3898
Gilad Broner44445992015-09-29 16:05:39 +03003899static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3900{
3901 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303902 container_of(work, struct sdhci_msm_pm_qos_irq,
3903 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003904
3905 if (atomic_read(&pm_qos_irq->counter))
3906 return;
3907
3908 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3909 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3910}
3911
3912void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3913{
3914 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3915 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3916 struct sdhci_msm_pm_qos_latency *latency =
3917 &msm_host->pdata->pm_qos_data.irq_latency;
3918 int counter;
3919
3920 if (!msm_host->pm_qos_irq.enabled)
3921 return;
3922
3923 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3924 /* Make sure to update the voting in case power policy has changed */
3925 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3926 && counter > 1)
3927 return;
3928
Asutosh Das36c2e922015-12-01 12:19:58 +05303929 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003930 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3931 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3932 msm_host->pm_qos_irq.latency);
3933}
3934
3935void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3936{
3937 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3938 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3939 int counter;
3940
3941 if (!msm_host->pm_qos_irq.enabled)
3942 return;
3943
Subhash Jadavani4d813902015-10-15 12:16:43 -07003944 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3945 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3946 } else {
3947 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3948 return;
Gilad Broner44445992015-09-29 16:05:39 +03003949 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003950
Gilad Broner44445992015-09-29 16:05:39 +03003951 if (counter)
3952 return;
3953
3954 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05303955 queue_delayed_work(msm_host->pm_qos_wq,
3956 &msm_host->pm_qos_irq.unvote_work,
3957 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03003958 return;
3959 }
3960
3961 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3962 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3963 msm_host->pm_qos_irq.latency);
3964}
3965
Gilad Broner68c54562015-09-20 11:59:46 +03003966static ssize_t
3967sdhci_msm_pm_qos_irq_show(struct device *dev,
3968 struct device_attribute *attr, char *buf)
3969{
3970 struct sdhci_host *host = dev_get_drvdata(dev);
3971 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3972 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3973 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3974
3975 return snprintf(buf, PAGE_SIZE,
3976 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3977 irq->enabled, atomic_read(&irq->counter), irq->latency);
3978}
3979
3980static ssize_t
3981sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3982 struct device_attribute *attr, char *buf)
3983{
3984 struct sdhci_host *host = dev_get_drvdata(dev);
3985 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3986 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3987
3988 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3989}
3990
3991static ssize_t
3992sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3993 struct device_attribute *attr, const char *buf, size_t count)
3994{
3995 struct sdhci_host *host = dev_get_drvdata(dev);
3996 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3997 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3998 uint32_t value;
3999 bool enable;
4000 int ret;
4001
4002 ret = kstrtou32(buf, 0, &value);
4003 if (ret)
4004 goto out;
4005 enable = !!value;
4006
4007 if (enable == msm_host->pm_qos_irq.enabled)
4008 goto out;
4009
4010 msm_host->pm_qos_irq.enabled = enable;
4011 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304012 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004013 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4014 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4015 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4016 msm_host->pm_qos_irq.latency);
4017 }
4018
4019out:
4020 return count;
4021}
4022
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004023#ifdef CONFIG_SMP
4024static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4025 struct sdhci_host *host)
4026{
4027 msm_host->pm_qos_irq.req.irq = host->irq;
4028}
4029#else
4030static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4031 struct sdhci_host *host) { }
4032#endif
4033
Vijay Viswanath1971d222018-03-01 12:01:47 +05304034static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
4035{
4036 char *wq = NULL;
4037 bool ret = true;
4038
4039 wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
4040 dev_name(&msm_host->pdev->dev));
4041 if (!wq)
4042 return false;
4043 /*
4044 * Create a work queue with flag WQ_MEM_RECLAIM set for
4045 * pm_qos_unvote work. Because mmc thread is created with
4046 * flag PF_MEMALLOC set, kernel will check for work queue
4047 * flag WQ_MEM_RECLAIM when flush the work queue. If work
4048 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
4049 * will be triggered.
4050 */
4051 msm_host->pm_qos_wq = create_workqueue(wq);
4052 if (!msm_host->pm_qos_wq) {
4053 ret = false;
4054 dev_err(&msm_host->pdev->dev,
4055 "failed to create pm qos unvote work queue\n");
4056 }
4057 kfree(wq);
4058 return ret;
4059}
4060
Gilad Broner44445992015-09-29 16:05:39 +03004061void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
4062{
4063 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4064 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4065 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03004066 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004067
4068 if (!msm_host->pdata->pm_qos_data.irq_valid)
4069 return;
4070
4071 /* Initialize only once as this gets called per partition */
4072 if (msm_host->pm_qos_irq.enabled)
4073 return;
4074
4075 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4076 msm_host->pm_qos_irq.req.type =
4077 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004078 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
4079 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
4080 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03004081 else
4082 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
4083 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
4084
Vijay Viswanath1971d222018-03-01 12:01:47 +05304085 sdhci_msm_pm_qos_wq_init(msm_host);
4086
Asutosh Das36c2e922015-12-01 12:19:58 +05304087 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004088 sdhci_msm_pm_qos_irq_unvote_work);
4089 /* For initialization phase, set the performance latency */
4090 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
4091 msm_host->pm_qos_irq.latency =
4092 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
4093 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
4094 msm_host->pm_qos_irq.latency);
4095 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004096
4097 /* sysfs */
4098 msm_host->pm_qos_irq.enable_attr.show =
4099 sdhci_msm_pm_qos_irq_enable_show;
4100 msm_host->pm_qos_irq.enable_attr.store =
4101 sdhci_msm_pm_qos_irq_enable_store;
4102 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
4103 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
4104 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
4105 ret = device_create_file(&msm_host->pdev->dev,
4106 &msm_host->pm_qos_irq.enable_attr);
4107 if (ret)
4108 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
4109 __func__, ret);
4110
4111 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
4112 msm_host->pm_qos_irq.status_attr.store = NULL;
4113 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
4114 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
4115 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
4116 ret = device_create_file(&msm_host->pdev->dev,
4117 &msm_host->pm_qos_irq.status_attr);
4118 if (ret)
4119 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
4120 __func__, ret);
4121}
4122
4123static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
4124 struct device_attribute *attr, char *buf)
4125{
4126 struct sdhci_host *host = dev_get_drvdata(dev);
4127 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4128 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4129 struct sdhci_msm_pm_qos_group *group;
4130 int i;
4131 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4132 int offset = 0;
4133
4134 for (i = 0; i < nr_groups; i++) {
4135 group = &msm_host->pm_qos[i];
4136 offset += snprintf(&buf[offset], PAGE_SIZE,
4137 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
4138 i, group->req.cpus_affine.bits[0],
4139 msm_host->pm_qos_group_enable,
4140 atomic_read(&group->counter),
4141 group->latency);
4142 }
4143
4144 return offset;
4145}
4146
4147static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
4148 struct device_attribute *attr, char *buf)
4149{
4150 struct sdhci_host *host = dev_get_drvdata(dev);
4151 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4152 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4153
4154 return snprintf(buf, PAGE_SIZE, "%s\n",
4155 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
4156}
4157
4158static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
4159 struct device_attribute *attr, const char *buf, size_t count)
4160{
4161 struct sdhci_host *host = dev_get_drvdata(dev);
4162 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4163 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4164 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4165 uint32_t value;
4166 bool enable;
4167 int ret;
4168 int i;
4169
4170 ret = kstrtou32(buf, 0, &value);
4171 if (ret)
4172 goto out;
4173 enable = !!value;
4174
4175 if (enable == msm_host->pm_qos_group_enable)
4176 goto out;
4177
4178 msm_host->pm_qos_group_enable = enable;
4179 if (!enable) {
4180 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304181 cancel_delayed_work_sync(
4182 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004183 atomic_set(&msm_host->pm_qos[i].counter, 0);
4184 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
4185 pm_qos_update_request(&msm_host->pm_qos[i].req,
4186 msm_host->pm_qos[i].latency);
4187 }
4188 }
4189
4190out:
4191 return count;
Gilad Broner44445992015-09-29 16:05:39 +03004192}
4193
4194static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
4195{
4196 int i;
4197 struct sdhci_msm_cpu_group_map *map =
4198 &msm_host->pdata->pm_qos_data.cpu_group_map;
4199
4200 if (cpu < 0)
4201 goto not_found;
4202
4203 for (i = 0; i < map->nr_groups; i++)
4204 if (cpumask_test_cpu(cpu, &map->mask[i]))
4205 return i;
4206
4207not_found:
4208 return -EINVAL;
4209}
4210
4211void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
4212 struct sdhci_msm_pm_qos_latency *latency, int cpu)
4213{
4214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4215 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4216 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4217 struct sdhci_msm_pm_qos_group *pm_qos_group;
4218 int counter;
4219
4220 if (!msm_host->pm_qos_group_enable || group < 0)
4221 return;
4222
4223 pm_qos_group = &msm_host->pm_qos[group];
4224 counter = atomic_inc_return(&pm_qos_group->counter);
4225
4226 /* Make sure to update the voting in case power policy has changed */
4227 if (pm_qos_group->latency == latency->latency[host->power_policy]
4228 && counter > 1)
4229 return;
4230
Asutosh Das36c2e922015-12-01 12:19:58 +05304231 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004232
4233 pm_qos_group->latency = latency->latency[host->power_policy];
4234 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
4235}
4236
4237static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
4238{
4239 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05304240 container_of(work, struct sdhci_msm_pm_qos_group,
4241 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004242
4243 if (atomic_read(&group->counter))
4244 return;
4245
4246 group->latency = PM_QOS_DEFAULT_VALUE;
4247 pm_qos_update_request(&group->req, group->latency);
4248}
4249
Gilad Broner07d92eb2015-09-29 16:57:21 +03004250bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03004251{
4252 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4253 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4254 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4255
4256 if (!msm_host->pm_qos_group_enable || group < 0 ||
4257 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03004258 return false;
Gilad Broner44445992015-09-29 16:05:39 +03004259
4260 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05304261 queue_delayed_work(msm_host->pm_qos_wq,
4262 &msm_host->pm_qos[group].unvote_work,
4263 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03004264 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004265 }
4266
4267 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4268 pm_qos_update_request(&msm_host->pm_qos[group].req,
4269 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004270 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004271}
4272
4273void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4274 struct sdhci_msm_pm_qos_latency *latency)
4275{
4276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4277 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4278 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4279 struct sdhci_msm_pm_qos_group *group;
4280 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004281 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004282
4283 if (msm_host->pm_qos_group_enable)
4284 return;
4285
4286 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4287 GFP_KERNEL);
4288 if (!msm_host->pm_qos)
4289 return;
4290
4291 for (i = 0; i < nr_groups; i++) {
4292 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304293 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004294 sdhci_msm_pm_qos_cpu_unvote_work);
4295 atomic_set(&group->counter, 0);
4296 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4297 cpumask_copy(&group->req.cpus_affine,
4298 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304299 /* We set default latency here for all pm_qos cpu groups. */
4300 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004301 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4302 group->latency);
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304303 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
Gilad Broner44445992015-09-29 16:05:39 +03004304 __func__, i,
4305 group->req.cpus_affine.bits[0],
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304306 group->latency);
Gilad Broner44445992015-09-29 16:05:39 +03004307 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004308 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004309 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004310
4311 /* sysfs */
4312 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4313 msm_host->pm_qos_group_status_attr.store = NULL;
4314 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4315 msm_host->pm_qos_group_status_attr.attr.name =
4316 "pm_qos_cpu_groups_status";
4317 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4318 ret = device_create_file(&msm_host->pdev->dev,
4319 &msm_host->pm_qos_group_status_attr);
4320 if (ret)
4321 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4322 __func__, ret);
4323 msm_host->pm_qos_group_enable_attr.show =
4324 sdhci_msm_pm_qos_group_enable_show;
4325 msm_host->pm_qos_group_enable_attr.store =
4326 sdhci_msm_pm_qos_group_enable_store;
4327 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4328 msm_host->pm_qos_group_enable_attr.attr.name =
4329 "pm_qos_cpu_groups_enable";
4330 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4331 ret = device_create_file(&msm_host->pdev->dev,
4332 &msm_host->pm_qos_group_enable_attr);
4333 if (ret)
4334 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4335 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004336}
4337
Gilad Broner07d92eb2015-09-29 16:57:21 +03004338static void sdhci_msm_pre_req(struct sdhci_host *host,
4339 struct mmc_request *mmc_req)
4340{
4341 int cpu;
4342 int group;
4343 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4344 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4345 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4346 msm_host->pm_qos_prev_cpu);
4347
4348 sdhci_msm_pm_qos_irq_vote(host);
4349
4350 cpu = get_cpu();
4351 put_cpu();
4352 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4353 if (group < 0)
4354 return;
4355
4356 if (group != prev_group && prev_group >= 0) {
4357 sdhci_msm_pm_qos_cpu_unvote(host,
4358 msm_host->pm_qos_prev_cpu, false);
4359 prev_group = -1; /* make sure to vote for new group */
4360 }
4361
4362 if (prev_group < 0) {
4363 sdhci_msm_pm_qos_cpu_vote(host,
4364 msm_host->pdata->pm_qos_data.latency, cpu);
4365 msm_host->pm_qos_prev_cpu = cpu;
4366 }
4367}
4368
4369static void sdhci_msm_post_req(struct sdhci_host *host,
4370 struct mmc_request *mmc_req)
4371{
4372 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4373 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4374
4375 sdhci_msm_pm_qos_irq_unvote(host, false);
4376
4377 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4378 msm_host->pm_qos_prev_cpu = -1;
4379}
4380
4381static void sdhci_msm_init(struct sdhci_host *host)
4382{
4383 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4384 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4385
4386 sdhci_msm_pm_qos_irq_init(host);
4387
4388 if (msm_host->pdata->pm_qos_data.legacy_valid)
4389 sdhci_msm_pm_qos_cpu_init(host,
4390 msm_host->pdata->pm_qos_data.latency);
4391}
4392
Sahitya Tummala9150a942014-10-31 15:33:04 +05304393static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4394{
4395 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4396 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4397 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4398 u32 max_curr = 0;
4399
4400 if (curr_slot && curr_slot->vdd_data)
4401 max_curr = curr_slot->vdd_data->hpm_uA;
4402
4403 return max_curr;
4404}
4405
Sahitya Tummala073ca552015-08-06 13:59:37 +05304406static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4407{
4408 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4409 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4410 int ret = 0;
4411 u32 clk_rate = 0;
4412
4413 if (!IS_ERR(msm_host->ice_clk)) {
4414 clk_rate = (state == MMC_LOAD_LOW) ?
4415 msm_host->pdata->ice_clk_min :
4416 msm_host->pdata->ice_clk_max;
4417 if (msm_host->ice_clk_rate == clk_rate)
4418 return 0;
4419 pr_debug("%s: changing ICE clk rate to %u\n",
4420 mmc_hostname(host->mmc), clk_rate);
4421 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4422 if (ret) {
4423 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4424 mmc_hostname(host->mmc), ret, clk_rate);
4425 return ret;
4426 }
4427 msm_host->ice_clk_rate = clk_rate;
4428 }
4429 return 0;
4430}
4431
Asutosh Das0ef24812012-12-18 16:14:02 +05304432static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304433 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304434 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304435 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304436 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304437 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304438 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304439 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004440 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304441 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004442 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304443 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304444 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304445 .get_min_clock = sdhci_msm_get_min_clock,
4446 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304447 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304448 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304449 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004450 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304451 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004452 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304453 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304454 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004455 .init = sdhci_msm_init,
4456 .pre_req = sdhci_msm_pre_req,
4457 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304458 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304459 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304460};
4461
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304462static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4463 struct sdhci_host *host)
4464{
Krishna Konda46fd1432014-10-30 21:13:27 -07004465 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304466 u16 minor;
4467 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304468 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304469 const struct sdhci_msm_offset *msm_host_offset =
4470 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304471
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304472 version = sdhci_msm_readl_relaxed(host,
4473 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304474 major = (version & CORE_VERSION_MAJOR_MASK) >>
4475 CORE_VERSION_MAJOR_SHIFT;
4476 minor = version & CORE_VERSION_TARGET_MASK;
4477
Krishna Konda46fd1432014-10-30 21:13:27 -07004478 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4479
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304480 /*
4481 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004482 * controller won't advertise 3.0v, 1.8v and 8-bit features
4483 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304484 */
4485 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004486 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004487 /*
4488 * Enable 1.8V support capability on controllers that
4489 * support dual voltage
4490 */
4491 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004492 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4493 caps |= CORE_3_0V_SUPPORT;
4494 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004495 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304496 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4497 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304498 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004499
4500 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304501 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4502 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4503 */
4504 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304505 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304506 val = readl_relaxed(host->ioaddr +
4507 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304508 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304509 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304510 }
4511 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004512 * SDCC 5 controller with major version 1, minor version 0x34 and later
4513 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4514 */
4515 if ((major == 1) && (minor < 0x34))
4516 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004517
4518 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004519 * SDCC 5 controller with major version 1, minor version 0x42 and later
4520 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304521 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004522 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304523 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004524 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304525 msm_host->enhanced_strobe = true;
4526 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004527
4528 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004529 * SDCC 5 controller with major version 1 and minor version 0x42,
4530 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4531 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304532 * when MCLK is gated OFF, it is not gated for less than 0.5us
4533 * and MCLK must be switched on for at-least 1us before DATA
4534 * starts coming.
4535 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004536 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badiganti06d2c8c2017-09-12 17:24:09 +05304537 (minor == 0x49) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304538 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004539
Pavan Anamula5a256df2015-10-16 14:38:28 +05304540 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304541 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304542 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304543 writel_relaxed((readl_relaxed(host->ioaddr +
4544 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4545 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304546 }
4547
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004548 if ((major == 1) && (minor >= 0x49))
4549 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304550 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004551 * Mask 64-bit support for controller with 32-bit address bus so that
4552 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004553 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004554 if (!msm_host->pdata->largeaddressbus)
4555 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4556
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304557 writel_relaxed(caps, host->ioaddr +
4558 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004559 /* keep track of the value in SDHCI_CAPABILITIES */
4560 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304561
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304562 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304563 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304564 host->cdr_support = true;
4565 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304566}
4567
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004568#ifdef CONFIG_MMC_CQ_HCI
4569static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4570 struct platform_device *pdev)
4571{
4572 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4573 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4574
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304575 if (nocmdq) {
4576 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4577 return;
4578 }
4579
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004580 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004581 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004582 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4583 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004584 host->cq_host = NULL;
4585 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004586 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004587 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004588}
4589#else
4590static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4591 struct platform_device *pdev)
4592{
4593
4594}
4595#endif
4596
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004597static bool sdhci_msm_is_bootdevice(struct device *dev)
4598{
4599 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4600 strlen(saved_command_line))) {
4601 char search_string[50];
4602
4603 snprintf(search_string, ARRAY_SIZE(search_string),
4604 "androidboot.bootdevice=%s", dev_name(dev));
4605 if (strnstr(saved_command_line, search_string,
4606 strlen(saved_command_line)))
4607 return true;
4608 else
4609 return false;
4610 }
4611
4612 /*
4613 * "androidboot.bootdevice=" argument is not present then
4614 * return true as we don't know the boot device anyways.
4615 */
4616 return true;
4617}
4618
Asutosh Das0ef24812012-12-18 16:14:02 +05304619static int sdhci_msm_probe(struct platform_device *pdev)
4620{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304621 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304622 struct sdhci_host *host;
4623 struct sdhci_pltfm_host *pltfm_host;
4624 struct sdhci_msm_host *msm_host;
4625 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004626 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004627 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004628 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304629 struct resource *tlmm_memres = NULL;
4630 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304631 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004632 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304633
4634 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4635 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4636 GFP_KERNEL);
4637 if (!msm_host) {
4638 ret = -ENOMEM;
4639 goto out;
4640 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304641
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304642 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4643 msm_host->mci_removed = true;
4644 msm_host->offset = &sdhci_msm_offset_mci_removed;
4645 } else {
4646 msm_host->mci_removed = false;
4647 msm_host->offset = &sdhci_msm_offset_mci_present;
4648 }
4649 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304650 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4651 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4652 if (IS_ERR(host)) {
4653 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304654 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304655 }
4656
4657 pltfm_host = sdhci_priv(host);
4658 pltfm_host->priv = msm_host;
4659 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304660 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304661
Asutosh Das1c43b132018-01-11 18:08:40 +05304662 ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
4663 if (ret == -EPROBE_DEFER) {
4664 dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
4665 goto pltfm_free;
4666 }
4667
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304668 /* get the ice device vops if present */
4669 ret = sdhci_msm_ice_get_dev(host);
4670 if (ret == -EPROBE_DEFER) {
4671 /*
4672 * SDHCI driver might be probed before ICE driver does.
4673 * In that case we would like to return EPROBE_DEFER code
4674 * in order to delay its probing.
4675 */
4676 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4677 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004678 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304679
4680 } else if (ret == -ENODEV) {
4681 /*
4682 * ICE device is not enabled in DTS file. No need for further
4683 * initialization of ICE driver.
4684 */
4685 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4686 __func__);
4687 } else if (ret) {
4688 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4689 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004690 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304691 }
4692
Asutosh Das0ef24812012-12-18 16:14:02 +05304693 /* Extract platform data */
4694 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004695 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304696 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004697 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4698 ret);
4699 goto pltfm_free;
4700 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004701
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004702 /* Read property to determine if the probe is forced */
4703 force_probe = of_find_property(pdev->dev.of_node,
4704 "qcom,force-sdhc1-probe", NULL);
4705
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004706 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004707 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4708 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004709 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004710 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004711 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004712
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004713 if (disable_slots & (1 << (ret - 1))) {
4714 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4715 ret);
4716 ret = -ENODEV;
4717 goto pltfm_free;
4718 }
4719
Sayali Lokhande5f768322016-04-11 18:36:53 +05304720 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004721 sdhci_slot[ret-1] = msm_host;
4722
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004723 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4724 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304725 if (!msm_host->pdata) {
4726 dev_err(&pdev->dev, "DT parsing error\n");
4727 goto pltfm_free;
4728 }
4729 } else {
4730 dev_err(&pdev->dev, "No device tree node\n");
4731 goto pltfm_free;
4732 }
4733
4734 /* Setup Clocks */
4735
4736 /* Setup SDCC bus voter clock. */
4737 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4738 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4739 /* Vote for max. clk rate for max. performance */
4740 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4741 if (ret)
4742 goto pltfm_free;
4743 ret = clk_prepare_enable(msm_host->bus_clk);
4744 if (ret)
4745 goto pltfm_free;
4746 }
4747
4748 /* Setup main peripheral bus clock */
4749 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4750 if (!IS_ERR(msm_host->pclk)) {
4751 ret = clk_prepare_enable(msm_host->pclk);
4752 if (ret)
4753 goto bus_clk_disable;
4754 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304755 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304756
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304757 /* Setup SDC ufs bus aggr clock */
4758 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4759 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4760 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4761 if (ret) {
4762 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4763 goto pclk_disable;
4764 }
4765 }
4766
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304767 if (msm_host->ice.pdev) {
4768 /* Setup SDC ICE clock */
4769 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4770 if (!IS_ERR(msm_host->ice_clk)) {
4771 /* ICE core has only one clock frequency for now */
4772 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304773 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304774 if (ret) {
4775 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4776 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304777 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304778 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304779 }
4780 ret = clk_prepare_enable(msm_host->ice_clk);
4781 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304782 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304783
4784 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304785 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304786 }
4787 }
4788
Asutosh Das0ef24812012-12-18 16:14:02 +05304789 /* Setup SDC MMC clock */
4790 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4791 if (IS_ERR(msm_host->clk)) {
4792 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304793 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304794 }
4795
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304796 /* Set to the minimum supported clock frequency */
4797 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4798 if (ret) {
4799 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304800 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304801 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304802 ret = clk_prepare_enable(msm_host->clk);
4803 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304804 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304805
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304806 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304807 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304808
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004809 /* Setup CDC calibration fixed feedback clock */
4810 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4811 if (!IS_ERR(msm_host->ff_clk)) {
4812 ret = clk_prepare_enable(msm_host->ff_clk);
4813 if (ret)
4814 goto clk_disable;
4815 }
4816
4817 /* Setup CDC calibration sleep clock */
4818 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4819 if (!IS_ERR(msm_host->sleep_clk)) {
4820 ret = clk_prepare_enable(msm_host->sleep_clk);
4821 if (ret)
4822 goto ff_clk_disable;
4823 }
4824
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004825 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4826
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304827 ret = sdhci_msm_bus_register(msm_host, pdev);
4828 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004829 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304830
4831 if (msm_host->msm_bus_vote.client_handle)
4832 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4833 sdhci_msm_bus_work);
4834 sdhci_msm_bus_voting(host, 1);
4835
Asutosh Das0ef24812012-12-18 16:14:02 +05304836 /* Setup regulators */
4837 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4838 if (ret) {
4839 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304840 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304841 }
4842
4843 /* Reset the core and Enable SDHC mode */
4844 core_memres = platform_get_resource_byname(pdev,
4845 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304846 if (!msm_host->mci_removed) {
4847 if (!core_memres) {
4848 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4849 goto vreg_deinit;
4850 }
4851 msm_host->core_mem = devm_ioremap(&pdev->dev,
4852 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304853
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304854 if (!msm_host->core_mem) {
4855 dev_err(&pdev->dev, "Failed to remap registers\n");
4856 ret = -ENOMEM;
4857 goto vreg_deinit;
4858 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304859 }
4860
Sahitya Tummala079ed852015-10-29 20:18:45 +05304861 tlmm_memres = platform_get_resource_byname(pdev,
4862 IORESOURCE_MEM, "tlmm_mem");
4863 if (tlmm_memres) {
4864 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4865 resource_size(tlmm_memres));
4866
4867 if (!tlmm_mem) {
4868 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4869 ret = -ENOMEM;
4870 goto vreg_deinit;
4871 }
4872 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
Sahitya Tummala079ed852015-10-29 20:18:45 +05304873 }
4874
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304875 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004876 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304877 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004878 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304879 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304880
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05304881 /*
4882 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
4883 */
4884 writel_relaxed((readl_relaxed(host->ioaddr +
4885 msm_host_offset->CORE_VENDOR_SPEC3) &
4886 ~CORE_FIFO_ALT_EN), host->ioaddr +
4887 msm_host_offset->CORE_VENDOR_SPEC3);
4888
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304889 if (!msm_host->mci_removed) {
4890 /* Set HC_MODE_EN bit in HC_MODE register */
4891 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304892
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304893 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4894 writel_relaxed(readl_relaxed(msm_host->core_mem +
4895 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4896 msm_host->core_mem + CORE_HC_MODE);
4897 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304898 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004899
4900 /*
4901 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4902 * be used as required later on.
4903 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304904 writel_relaxed((readl_relaxed(host->ioaddr +
4905 msm_host_offset->CORE_VENDOR_SPEC) |
4906 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4907 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304908 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304909 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4910 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4911 * interrupt in GIC (by registering the interrupt handler), we need to
4912 * ensure that any pending power irq interrupt status is acknowledged
4913 * otherwise power irq interrupt handler would be fired prematurely.
4914 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304915 irq_status = sdhci_msm_readl_relaxed(host,
4916 msm_host_offset->CORE_PWRCTL_STATUS);
4917 sdhci_msm_writel_relaxed(irq_status, host,
4918 msm_host_offset->CORE_PWRCTL_CLEAR);
4919 irq_ctl = sdhci_msm_readl_relaxed(host,
4920 msm_host_offset->CORE_PWRCTL_CTL);
4921
Subhash Jadavani28137342013-05-14 17:46:43 +05304922 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4923 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4924 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4925 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304926 sdhci_msm_writel_relaxed(irq_ctl, host,
4927 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004928
Subhash Jadavani28137342013-05-14 17:46:43 +05304929 /*
4930 * Ensure that above writes are propogated before interrupt enablement
4931 * in GIC.
4932 */
4933 mb();
4934
4935 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304936 * Following are the deviations from SDHC spec v3.0 -
4937 * 1. Card detection is handled using separate GPIO.
4938 * 2. Bus power control is handled by interacting with PMIC.
4939 */
4940 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4941 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304942 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004943 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304944 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304945 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304946 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304947 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304948 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304949 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304950
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304951 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4952 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4953
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004954 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004955 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
4956 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
4957 SDHCI_VENDOR_VER_SHIFT));
4958 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
4959 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
4960 /*
4961 * Add 40us delay in interrupt handler when
4962 * operating at initialization frequency(400KHz).
4963 */
4964 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
4965 /*
4966 * Set Software Reset for DAT line in Software
4967 * Reset Register (Bit 2).
4968 */
4969 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
4970 }
4971
Asutosh Das214b9662013-06-13 14:27:42 +05304972 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
4973
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004974 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004975 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4976 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304977 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004978 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304979 goto vreg_deinit;
4980 }
Subhash Jadavanide139e82017-09-27 11:04:40 +05304981
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004982 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304983 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004984 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304985 if (ret) {
4986 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004987 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304988 goto vreg_deinit;
4989 }
4990
4991 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304992 sdhci_msm_writel_relaxed(INT_MASK, host,
4993 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05304994
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304995#ifdef CONFIG_MMC_CLKGATE
4996 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4997 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4998#endif
4999
Asutosh Das0ef24812012-12-18 16:14:02 +05305000 /* Set host capabilities */
5001 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
5002 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005003 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05305004 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05305005 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005006 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005007 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03005008 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05305009 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07005010 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03005011 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305012 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05305013
5014 if (msm_host->pdata->nonremovable)
5015 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
5016
Guoping Yuf7c91332014-08-20 16:56:18 +08005017 if (msm_host->pdata->nonhotplug)
5018 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
5019
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07005020 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
5021
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305022 /* Initialize ICE if present */
5023 if (msm_host->ice.pdev) {
5024 ret = sdhci_msm_ice_init(host);
5025 if (ret) {
5026 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
5027 mmc_hostname(host->mmc), ret);
5028 ret = -EINVAL;
5029 goto vreg_deinit;
5030 }
5031 host->is_crypto_en = true;
5032 /* Packed commands cannot be encrypted/decrypted using ICE */
5033 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
5034 MMC_CAP2_PACKED_WR_CONTROL);
5035 }
5036
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05305037 init_completion(&msm_host->pwr_irq_completion);
5038
Sahitya Tummala581df132013-03-12 14:57:46 +05305039 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05305040 /*
5041 * Set up the card detect GPIO in active configuration before
5042 * configuring it as an IRQ. Otherwise, it can be in some
5043 * weird/inconsistent state resulting in flood of interrupts.
5044 */
5045 sdhci_msm_setup_pins(msm_host->pdata, true);
5046
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05305047 /*
5048 * This delay is needed for stabilizing the card detect GPIO
5049 * line after changing the pull configs.
5050 */
5051 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05305052 ret = mmc_gpio_request_cd(msm_host->mmc,
5053 msm_host->pdata->status_gpio, 0);
5054 if (ret) {
5055 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
5056 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305057 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05305058 }
5059 }
5060
Krishna Konda7feab352013-09-17 23:55:40 -07005061 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
5062 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
5063 host->dma_mask = DMA_BIT_MASK(64);
5064 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305065 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07005066 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305067 host->dma_mask = DMA_BIT_MASK(32);
5068 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305069 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305070 } else {
5071 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
5072 }
5073
Ritesh Harjani42876f42015-11-17 17:46:51 +05305074 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
5075 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05305076 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305077 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
5078 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305079 msm_host->is_sdiowakeup_enabled = true;
5080 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
5081 sdhci_msm_sdiowakeup_irq,
5082 IRQF_SHARED | IRQF_TRIGGER_HIGH,
5083 "sdhci-msm sdiowakeup", host);
5084 if (ret) {
5085 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
5086 __func__, msm_host->pdata->sdiowakeup_irq, ret);
5087 msm_host->pdata->sdiowakeup_irq = -1;
5088 msm_host->is_sdiowakeup_enabled = false;
5089 goto vreg_deinit;
5090 } else {
5091 spin_lock_irqsave(&host->lock, flags);
5092 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305093 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305094 spin_unlock_irqrestore(&host->lock, flags);
5095 }
5096 }
5097
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07005098 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305099 ret = sdhci_add_host(host);
5100 if (ret) {
5101 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05305102 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05305103 }
5104
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05305105 msm_host->pltfm_init_done = true;
5106
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005107 pm_runtime_set_active(&pdev->dev);
5108 pm_runtime_enable(&pdev->dev);
5109 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
5110 pm_runtime_use_autosuspend(&pdev->dev);
5111
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305112 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
5113 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
5114 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
5115 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
5116 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
5117 ret = device_create_file(&pdev->dev,
5118 &msm_host->msm_bus_vote.max_bus_bw);
5119 if (ret)
5120 goto remove_host;
5121
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305122 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
5123 msm_host->polling.show = show_polling;
5124 msm_host->polling.store = store_polling;
5125 sysfs_attr_init(&msm_host->polling.attr);
5126 msm_host->polling.attr.name = "polling";
5127 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
5128 ret = device_create_file(&pdev->dev, &msm_host->polling);
5129 if (ret)
5130 goto remove_max_bus_bw_file;
5131 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05305132
5133 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
5134 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
5135 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
5136 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
5137 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
5138 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5139 if (ret) {
5140 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
5141 mmc_hostname(host->mmc), __func__, ret);
5142 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5143 }
Ankit Jain1d7e5182017-09-20 11:55:38 +05305144 if (sdhci_msm_is_bootdevice(&pdev->dev))
5145 mmc_flush_detect_work(host->mmc);
5146
Asutosh Das0ef24812012-12-18 16:14:02 +05305147 /* Successful initialization */
5148 goto out;
5149
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305150remove_max_bus_bw_file:
5151 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05305152remove_host:
5153 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005154 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305155 sdhci_remove_host(host, dead);
5156vreg_deinit:
5157 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305158bus_unregister:
5159 if (msm_host->msm_bus_vote.client_handle)
5160 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5161 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005162sleep_clk_disable:
5163 if (!IS_ERR(msm_host->sleep_clk))
5164 clk_disable_unprepare(msm_host->sleep_clk);
5165ff_clk_disable:
5166 if (!IS_ERR(msm_host->ff_clk))
5167 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305168clk_disable:
5169 if (!IS_ERR(msm_host->clk))
5170 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05305171bus_aggr_clk_disable:
5172 if (!IS_ERR(msm_host->bus_aggr_clk))
5173 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305174pclk_disable:
5175 if (!IS_ERR(msm_host->pclk))
5176 clk_disable_unprepare(msm_host->pclk);
5177bus_clk_disable:
5178 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
5179 clk_disable_unprepare(msm_host->bus_clk);
5180pltfm_free:
5181 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305182out_host_free:
5183 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305184out:
5185 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
5186 return ret;
5187}
5188
5189static int sdhci_msm_remove(struct platform_device *pdev)
5190{
5191 struct sdhci_host *host = platform_get_drvdata(pdev);
5192 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5193 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5194 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
5195 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
5196 0xffffffff);
5197
5198 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305199 if (!gpio_is_valid(msm_host->pdata->status_gpio))
5200 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305201 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005202 pm_runtime_disable(&pdev->dev);
Vijay Viswanath1971d222018-03-01 12:01:47 +05305203
5204 if (msm_host->pm_qos_wq)
5205 destroy_workqueue(msm_host->pm_qos_wq);
Asutosh Das0ef24812012-12-18 16:14:02 +05305206 sdhci_remove_host(host, dead);
5207 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05305208
Asutosh Das0ef24812012-12-18 16:14:02 +05305209 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305210
Pratibhasagar V9acf2642013-11-21 21:07:21 +05305211 sdhci_msm_setup_pins(pdata, true);
5212 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305213
5214 if (msm_host->msm_bus_vote.client_handle) {
5215 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5216 sdhci_msm_bus_unregister(msm_host);
5217 }
Asutosh Das0ef24812012-12-18 16:14:02 +05305218 return 0;
5219}
5220
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005221#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05305222static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
5223{
5224 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5225 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5226 unsigned long flags;
5227 int ret = 0;
5228
5229 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
5230 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
5231 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305232 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305233 return 1;
5234 }
5235
5236 spin_lock_irqsave(&host->lock, flags);
5237 if (enable) {
5238 /* configure DAT1 gpio if applicable */
5239 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305240 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305241 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5242 if (!ret)
5243 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
5244 goto out;
5245 } else {
5246 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
5247 mmc_hostname(host->mmc), enable);
5248 }
5249 } else {
5250 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
5251 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5252 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305253 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305254 } else {
5255 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
5256 mmc_hostname(host->mmc), enable);
5257
5258 }
5259 }
5260out:
5261 if (ret)
5262 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
5263 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
5264 ret, msm_host->pdata->sdiowakeup_irq);
5265 spin_unlock_irqrestore(&host->lock, flags);
5266 return ret;
5267}
5268
5269
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005270static int sdhci_msm_runtime_suspend(struct device *dev)
5271{
5272 struct sdhci_host *host = dev_get_drvdata(dev);
5273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5274 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005275 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305276 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005277
Ritesh Harjani42876f42015-11-17 17:46:51 +05305278 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5279 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305280
Ritesh Harjani42876f42015-11-17 17:46:51 +05305281 sdhci_cfg_irq(host, false, true);
5282
5283defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005284 disable_irq(msm_host->pwr_irq);
5285
5286 /*
5287 * Remove the vote immediately only if clocks are off in which
5288 * case we might have queued work to remove vote but it may not
5289 * be completed before runtime suspend or system suspend.
5290 */
5291 if (!atomic_read(&msm_host->clks_on)) {
5292 if (msm_host->msm_bus_vote.client_handle)
5293 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5294 }
5295
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305296 if (host->is_crypto_en) {
5297 ret = sdhci_msm_ice_suspend(host);
5298 if (ret < 0)
5299 pr_err("%s: failed to suspend crypto engine %d\n",
5300 mmc_hostname(host->mmc), ret);
5301 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005302 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5303 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005304 return 0;
5305}
5306
5307static int sdhci_msm_runtime_resume(struct device *dev)
5308{
5309 struct sdhci_host *host = dev_get_drvdata(dev);
5310 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5311 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005312 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305313 int ret;
5314
5315 if (host->is_crypto_en) {
5316 ret = sdhci_msm_enable_controller_clock(host);
5317 if (ret) {
5318 pr_err("%s: Failed to enable reqd clocks\n",
5319 mmc_hostname(host->mmc));
5320 goto skip_ice_resume;
5321 }
5322 ret = sdhci_msm_ice_resume(host);
5323 if (ret)
5324 pr_err("%s: failed to resume crypto engine %d\n",
5325 mmc_hostname(host->mmc), ret);
5326 }
5327skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005328
Ritesh Harjani42876f42015-11-17 17:46:51 +05305329 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5330 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305331
Ritesh Harjani42876f42015-11-17 17:46:51 +05305332 sdhci_cfg_irq(host, true, true);
5333
5334defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005335 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005336
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005337 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5338 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005339 return 0;
5340}
5341
5342static int sdhci_msm_suspend(struct device *dev)
5343{
5344 struct sdhci_host *host = dev_get_drvdata(dev);
5345 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5346 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005347 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305348 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005349 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005350
5351 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5352 (msm_host->mmc->slot.cd_irq >= 0))
5353 disable_irq(msm_host->mmc->slot.cd_irq);
5354
5355 if (pm_runtime_suspended(dev)) {
5356 pr_debug("%s: %s: already runtime suspended\n",
5357 mmc_hostname(host->mmc), __func__);
5358 goto out;
5359 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005360 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005361out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305362 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305363 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5364 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5365 if (sdio_cfg)
5366 sdhci_cfg_irq(host, false, true);
5367 }
5368
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005369 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5370 ktime_to_us(ktime_sub(ktime_get(), start)));
5371 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005372}
5373
5374static int sdhci_msm_resume(struct device *dev)
5375{
5376 struct sdhci_host *host = dev_get_drvdata(dev);
5377 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5378 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5379 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305380 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005381 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005382
5383 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5384 (msm_host->mmc->slot.cd_irq >= 0))
5385 enable_irq(msm_host->mmc->slot.cd_irq);
5386
5387 if (pm_runtime_suspended(dev)) {
5388 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5389 mmc_hostname(host->mmc), __func__);
5390 goto out;
5391 }
5392
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005393 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005394out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305395 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5396 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5397 if (sdio_cfg)
5398 sdhci_cfg_irq(host, true, true);
5399 }
5400
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005401 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5402 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005403 return ret;
5404}
5405
Ritesh Harjani42876f42015-11-17 17:46:51 +05305406static int sdhci_msm_suspend_noirq(struct device *dev)
5407{
5408 struct sdhci_host *host = dev_get_drvdata(dev);
5409 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5410 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5411 int ret = 0;
5412
5413 /*
5414 * ksdioirqd may be running, hence retry
5415 * suspend in case the clocks are ON
5416 */
5417 if (atomic_read(&msm_host->clks_on)) {
5418 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5419 mmc_hostname(host->mmc), __func__);
5420 ret = -EAGAIN;
5421 }
5422
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305423 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5424 if (msm_host->sdio_pending_processing)
5425 ret = -EBUSY;
5426
Ritesh Harjani42876f42015-11-17 17:46:51 +05305427 return ret;
5428}
5429
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005430static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305431 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005432 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5433 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305434 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005435};
5436
5437#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5438
5439#else
5440#define SDHCI_MSM_PMOPS NULL
5441#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305442static const struct of_device_id sdhci_msm_dt_match[] = {
5443 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305444 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005445 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305446};
5447MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5448
5449static struct platform_driver sdhci_msm_driver = {
5450 .probe = sdhci_msm_probe,
5451 .remove = sdhci_msm_remove,
5452 .driver = {
5453 .name = "sdhci_msm",
5454 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305455 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305456 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005457 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305458 },
5459};
5460
5461module_platform_driver(sdhci_msm_driver);
5462
5463MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5464MODULE_LICENSE("GPL v2");