blob: 11192924097570320c77a02c0fa08435762b12a9 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Asutosh Das1c43b132018-01-11 18:08:40 +05305 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +053024#include <linux/of_device.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053025#include <linux/of_gpio.h>
26#include <linux/regulator/consumer.h>
27#include <linux/types.h>
28#include <linux/input.h>
29#include <linux/platform_device.h>
30#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070031#include <linux/io.h>
32#include <linux/delay.h>
33#include <linux/scatterlist.h>
34#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053035#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053036#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053037#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053038#include <linux/pinctrl/consumer.h>
39#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053040#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020041#include <linux/pm_runtime.h>
Asutosh Das1c43b132018-01-11 18:08:40 +053042#include <linux/nvmem-consumer.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020043#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053044
Sahitya Tummala56874732015-05-21 08:24:03 +053045#include "sdhci-msm.h"
Sahitya Tummala9325fb02015-05-08 11:53:29 +053046#include "sdhci-msm-ice.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070047#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053048
Asutosh Das36c2e922015-12-01 12:19:58 +053049#define QOS_REMOVE_DELAY_MS 10
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070053#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080054
55#define CORE_VERSION_STEP_MASK 0x0000FFFF
56#define CORE_VERSION_MINOR_MASK 0x0FFF0000
57#define CORE_VERSION_MINOR_SHIFT 16
58#define CORE_VERSION_MAJOR_MASK 0xF0000000
59#define CORE_VERSION_MAJOR_SHIFT 28
60#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030061#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080062
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080063#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Asutosh Das0ef24812012-12-18 16:14:02 +053072#define CORE_PWRCTL_BUS_OFF 0x01
73#define CORE_PWRCTL_BUS_ON (1 << 1)
74#define CORE_PWRCTL_IO_LOW (1 << 2)
75#define CORE_PWRCTL_IO_HIGH (1 << 3)
76
77#define CORE_PWRCTL_BUS_SUCCESS 0x01
78#define CORE_PWRCTL_BUS_FAIL (1 << 1)
79#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
80#define CORE_PWRCTL_IO_FAIL (1 << 3)
81
82#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070083#define MAX_PHASES 16
84
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070085#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070086#define CORE_DLL_EN (1 << 16)
87#define CORE_CDR_EN (1 << 17)
88#define CORE_CK_OUT_EN (1 << 18)
89#define CORE_CDR_EXT_EN (1 << 19)
90#define CORE_DLL_PDN (1 << 29)
91#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070092
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070093#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -070094#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070095
Krishna Konda46fd1432014-10-30 21:13:27 -070096#define CORE_CLK_PWRSAVE (1 << 1)
97#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
98#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
99#define CORE_HC_MCLK_SEL_MASK (3 << 8)
100#define CORE_HC_AUTO_CMD21_EN (1 << 6)
101#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700102#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700103#define CORE_HC_SELECT_IN_EN (1 << 18)
104#define CORE_HC_SELECT_IN_HS400 (6 << 19)
105#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700106#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700107
Pavan Anamula691dd592015-08-25 16:11:20 +0530108#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
109#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530110#define CORE_ONE_MID_EN (1 << 25)
111
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530112#define CORE_8_BIT_SUPPORT (1 << 18)
113#define CORE_3_3V_SUPPORT (1 << 24)
114#define CORE_3_0V_SUPPORT (1 << 25)
115#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300116#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700117
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700118#define CORE_CSR_CDC_CTLR_CFG0 0x130
119#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
120#define CORE_HW_AUTOCAL_ENA (1 << 17)
121
122#define CORE_CSR_CDC_CTLR_CFG1 0x134
123#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
124#define CORE_TIMER_ENA (1 << 16)
125
126#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
127#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
128#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
129#define CORE_CDC_OFFSET_CFG 0x14C
130#define CORE_CSR_CDC_DELAY_CFG 0x150
131#define CORE_CDC_SLAVE_DDA_CFG 0x160
132#define CORE_CSR_CDC_STATUS0 0x164
133#define CORE_CALIBRATION_DONE (1 << 0)
134
135#define CORE_CDC_ERROR_CODE_MASK 0x7000000
136
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300137#define CQ_CMD_DBG_RAM 0x110
138#define CQ_CMD_DBG_RAM_WA 0x150
139#define CQ_CMD_DBG_RAM_OL 0x154
140
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700141#define CORE_CSR_CDC_GEN_CFG 0x178
142#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
143#define CORE_CDC_SWITCH_RC_EN (1 << 1)
144
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700145#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530146#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700147#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530148
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700149#define CORE_PWRSAVE_DLL (1 << 3)
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +0530150#define CORE_FIFO_ALT_EN (1 << 10)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530151#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700152
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700153#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800154#define CORE_FLL_CYCLE_CNT (1 << 18)
155#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700156
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530157#define DDR_CONFIG_POR_VAL 0x80040853
158#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
159#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700160#define DDR_CONFIG_2_POR_VAL 0x80040873
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530161#define DLL_USR_CTL_POR_VAL 0x10800
162#define ENABLE_DLL_LOCK_STATUS (1 << 26)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700163
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700164/* 512 descriptors */
165#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530166#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530167
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700168#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800169#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700170
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700171#define INVALID_TUNING_PHASE -1
Ritesh Harjani42876f42015-11-17 17:46:51 +0530172#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700173
Krishna Konda96e6b112013-10-28 15:25:03 -0700174#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200175#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200176#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700177
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +0530178#define RCLK_TOGGLE 0x2
179
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530180struct sdhci_msm_offset {
181 u32 CORE_MCI_DATA_CNT;
182 u32 CORE_MCI_STATUS;
183 u32 CORE_MCI_FIFO_CNT;
184 u32 CORE_MCI_VERSION;
185 u32 CORE_GENERICS;
186 u32 CORE_TESTBUS_CONFIG;
187 u32 CORE_TESTBUS_SEL2_BIT;
188 u32 CORE_TESTBUS_ENA;
189 u32 CORE_TESTBUS_SEL2;
190 u32 CORE_PWRCTL_STATUS;
191 u32 CORE_PWRCTL_MASK;
192 u32 CORE_PWRCTL_CLEAR;
193 u32 CORE_PWRCTL_CTL;
194 u32 CORE_SDCC_DEBUG_REG;
195 u32 CORE_DLL_CONFIG;
196 u32 CORE_DLL_STATUS;
197 u32 CORE_VENDOR_SPEC;
198 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
199 u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
200 u32 CORE_VENDOR_SPEC_FUNC2;
201 u32 CORE_VENDOR_SPEC_CAPABILITIES0;
202 u32 CORE_DDR_200_CFG;
203 u32 CORE_VENDOR_SPEC3;
204 u32 CORE_DLL_CONFIG_2;
205 u32 CORE_DDR_CONFIG;
206 u32 CORE_DDR_CONFIG_2;
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530207 u32 CORE_DLL_USR_CTL; /* Present on SDCC5.1 onwards */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530208};
209
210struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
211 .CORE_MCI_DATA_CNT = 0x35C,
212 .CORE_MCI_STATUS = 0x324,
213 .CORE_MCI_FIFO_CNT = 0x308,
214 .CORE_MCI_VERSION = 0x318,
215 .CORE_GENERICS = 0x320,
216 .CORE_TESTBUS_CONFIG = 0x32C,
217 .CORE_TESTBUS_SEL2_BIT = 3,
218 .CORE_TESTBUS_ENA = (1 << 31),
219 .CORE_TESTBUS_SEL2 = (1 << 3),
220 .CORE_PWRCTL_STATUS = 0x240,
221 .CORE_PWRCTL_MASK = 0x244,
222 .CORE_PWRCTL_CLEAR = 0x248,
223 .CORE_PWRCTL_CTL = 0x24C,
224 .CORE_SDCC_DEBUG_REG = 0x358,
225 .CORE_DLL_CONFIG = 0x200,
226 .CORE_DLL_STATUS = 0x208,
227 .CORE_VENDOR_SPEC = 0x20C,
228 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
229 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
230 .CORE_VENDOR_SPEC_FUNC2 = 0x210,
231 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
232 .CORE_DDR_200_CFG = 0x224,
233 .CORE_VENDOR_SPEC3 = 0x250,
234 .CORE_DLL_CONFIG_2 = 0x254,
235 .CORE_DDR_CONFIG = 0x258,
236 .CORE_DDR_CONFIG_2 = 0x25C,
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530237 .CORE_DLL_USR_CTL = 0x388,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530238};
239
240struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
241 .CORE_MCI_DATA_CNT = 0x30,
242 .CORE_MCI_STATUS = 0x34,
243 .CORE_MCI_FIFO_CNT = 0x44,
244 .CORE_MCI_VERSION = 0x050,
245 .CORE_GENERICS = 0x70,
246 .CORE_TESTBUS_CONFIG = 0x0CC,
247 .CORE_TESTBUS_SEL2_BIT = 4,
248 .CORE_TESTBUS_ENA = (1 << 3),
249 .CORE_TESTBUS_SEL2 = (1 << 4),
250 .CORE_PWRCTL_STATUS = 0xDC,
251 .CORE_PWRCTL_MASK = 0xE0,
252 .CORE_PWRCTL_CLEAR = 0xE4,
253 .CORE_PWRCTL_CTL = 0xE8,
254 .CORE_SDCC_DEBUG_REG = 0x124,
255 .CORE_DLL_CONFIG = 0x100,
256 .CORE_DLL_STATUS = 0x108,
257 .CORE_VENDOR_SPEC = 0x10C,
258 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
259 .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
260 .CORE_VENDOR_SPEC_FUNC2 = 0x110,
261 .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
262 .CORE_DDR_200_CFG = 0x184,
263 .CORE_VENDOR_SPEC3 = 0x1B0,
264 .CORE_DLL_CONFIG_2 = 0x1B4,
265 .CORE_DDR_CONFIG = 0x1B8,
266 .CORE_DDR_CONFIG_2 = 0x1BC,
267};
268
269u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
270{
271 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
272 struct sdhci_msm_host *msm_host = pltfm_host->priv;
273 void __iomem *base_addr;
274
275 if (msm_host->mci_removed)
276 base_addr = host->ioaddr;
277 else
278 base_addr = msm_host->core_mem;
279
280 return readb_relaxed(base_addr + offset);
281}
282
283u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
284{
285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
286 struct sdhci_msm_host *msm_host = pltfm_host->priv;
287 void __iomem *base_addr;
288
289 if (msm_host->mci_removed)
290 base_addr = host->ioaddr;
291 else
292 base_addr = msm_host->core_mem;
293
294 return readl_relaxed(base_addr + offset);
295}
296
297void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
298{
299 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
300 struct sdhci_msm_host *msm_host = pltfm_host->priv;
301 void __iomem *base_addr;
302
303 if (msm_host->mci_removed)
304 base_addr = host->ioaddr;
305 else
306 base_addr = msm_host->core_mem;
307
308 writeb_relaxed(val, base_addr + offset);
309}
310
311void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
312{
313 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
314 struct sdhci_msm_host *msm_host = pltfm_host->priv;
315 void __iomem *base_addr;
316
317 if (msm_host->mci_removed)
318 base_addr = host->ioaddr;
319 else
320 base_addr = msm_host->core_mem;
321
322 writel_relaxed(val, base_addr + offset);
323}
324
Ritesh Harjani82124772014-11-04 15:34:00 +0530325/* Timeout value to avoid infinite waiting for pwr_irq */
326#define MSM_PWR_IRQ_TIMEOUT_MS 5000
327
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700328static const u32 tuning_block_64[] = {
329 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
330 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
331 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
332 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
333};
334
335static const u32 tuning_block_128[] = {
336 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
337 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
338 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
339 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
340 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
341 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
342 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
343 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
344};
Asutosh Das0ef24812012-12-18 16:14:02 +0530345
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700346/* global to hold each slot instance for debug */
347static struct sdhci_msm_host *sdhci_slot[2];
348
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700349static int disable_slots;
350/* root can write, others read */
351module_param(disable_slots, int, S_IRUGO|S_IWUSR);
352
Ritesh Harjani7270ca22017-01-03 15:46:06 +0530353static bool nocmdq;
354module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
355
Asutosh Das0ef24812012-12-18 16:14:02 +0530356enum vdd_io_level {
357 /* set vdd_io_data->low_vol_level */
358 VDD_IO_LOW,
359 /* set vdd_io_data->high_vol_level */
360 VDD_IO_HIGH,
361 /*
362 * set whatever there in voltage_level (third argument) of
363 * sdhci_msm_set_vdd_io_vol() function.
364 */
365 VDD_IO_SET_LEVEL,
366};
367
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700368/* MSM platform specific tuning */
369static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
370 u8 poll)
371{
372 int rc = 0;
373 u32 wait_cnt = 50;
374 u8 ck_out_en = 0;
375 struct mmc_host *mmc = host->mmc;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530376 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
377 struct sdhci_msm_host *msm_host = pltfm_host->priv;
378 const struct sdhci_msm_offset *msm_host_offset =
379 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700380
381 /* poll for CK_OUT_EN bit. max. poll time = 50us */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530382 ck_out_en = !!(readl_relaxed(host->ioaddr +
383 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700384
385 while (ck_out_en != poll) {
386 if (--wait_cnt == 0) {
387 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
388 mmc_hostname(mmc), __func__, poll);
389 rc = -ETIMEDOUT;
390 goto out;
391 }
392 udelay(1);
393
394 ck_out_en = !!(readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530395 msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700396 }
397out:
398 return rc;
399}
400
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530401/*
402 * Enable CDR to track changes of DAT lines and adjust sampling
403 * point according to voltage/temperature variations
404 */
405static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
406{
407 int rc = 0;
408 u32 config;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530409 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
410 struct sdhci_msm_host *msm_host = pltfm_host->priv;
411 const struct sdhci_msm_offset *msm_host_offset =
412 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530413
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530414 config = readl_relaxed(host->ioaddr +
415 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530416 config |= CORE_CDR_EN;
417 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530418 writel_relaxed(config, host->ioaddr +
419 msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530420
421 rc = msm_dll_poll_ck_out_en(host, 0);
422 if (rc)
423 goto err;
424
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530425 writel_relaxed((readl_relaxed(host->ioaddr +
426 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
427 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530428
429 rc = msm_dll_poll_ck_out_en(host, 1);
430 if (rc)
431 goto err;
432 goto out;
433err:
434 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
435out:
436 return rc;
437}
438
439static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
440 *attr, const char *buf, size_t count)
441{
442 struct sdhci_host *host = dev_get_drvdata(dev);
443 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
444 struct sdhci_msm_host *msm_host = pltfm_host->priv;
445 u32 tmp;
446 unsigned long flags;
447
448 if (!kstrtou32(buf, 0, &tmp)) {
449 spin_lock_irqsave(&host->lock, flags);
450 msm_host->en_auto_cmd21 = !!tmp;
451 spin_unlock_irqrestore(&host->lock, flags);
452 }
453 return count;
454}
455
456static ssize_t show_auto_cmd21(struct device *dev,
457 struct device_attribute *attr, char *buf)
458{
459 struct sdhci_host *host = dev_get_drvdata(dev);
460 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
461 struct sdhci_msm_host *msm_host = pltfm_host->priv;
462
463 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
464}
465
466/* MSM auto-tuning handler */
467static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
468 bool enable,
469 u32 type)
470{
471 int rc = 0;
472 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
473 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530474 const struct sdhci_msm_offset *msm_host_offset =
475 msm_host->offset;
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530476 u32 val = 0;
477
478 if (!msm_host->en_auto_cmd21)
479 return 0;
480
481 if (type == MMC_SEND_TUNING_BLOCK_HS200)
482 val = CORE_HC_AUTO_CMD21_EN;
483 else
484 return 0;
485
486 if (enable) {
487 rc = msm_enable_cdr_cm_sdc4_dll(host);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530488 writel_relaxed(readl_relaxed(host->ioaddr +
489 msm_host_offset->CORE_VENDOR_SPEC) | val,
490 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530491 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530492 writel_relaxed(readl_relaxed(host->ioaddr +
493 msm_host_offset->CORE_VENDOR_SPEC) & ~val,
494 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530495 }
496 return rc;
497}
498
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700499static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
500{
501 int rc = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530502 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
503 struct sdhci_msm_host *msm_host = pltfm_host->priv;
504 const struct sdhci_msm_offset *msm_host_offset =
505 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700506 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
507 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
508 0x8};
509 unsigned long flags;
510 u32 config;
511 struct mmc_host *mmc = host->mmc;
512
513 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
514 spin_lock_irqsave(&host->lock, flags);
515
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530516 config = readl_relaxed(host->ioaddr +
517 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700518 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
519 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530520 writel_relaxed(config, host->ioaddr +
521 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700522
523 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
524 rc = msm_dll_poll_ck_out_en(host, 0);
525 if (rc)
526 goto err_out;
527
528 /*
529 * Write the selected DLL clock output phase (0 ... 15)
530 * to CDR_SELEXT bit field of DLL_CONFIG register.
531 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530532 writel_relaxed(((readl_relaxed(host->ioaddr +
533 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700534 & ~(0xF << 20))
535 | (grey_coded_phase_table[phase] << 20)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530536 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700537
538 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530539 writel_relaxed((readl_relaxed(host->ioaddr +
540 msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
541 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700542
543 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
544 rc = msm_dll_poll_ck_out_en(host, 1);
545 if (rc)
546 goto err_out;
547
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530548 config = readl_relaxed(host->ioaddr +
549 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700550 config |= CORE_CDR_EN;
551 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530552 writel_relaxed(config, host->ioaddr +
553 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700554 goto out;
555
556err_out:
557 pr_err("%s: %s: Failed to set DLL phase: %d\n",
558 mmc_hostname(mmc), __func__, phase);
559out:
560 spin_unlock_irqrestore(&host->lock, flags);
561 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
562 return rc;
563}
564
565/*
566 * Find out the greatest range of consecuitive selected
567 * DLL clock output phases that can be used as sampling
568 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700569 * timing mode) or for eMMC4.5 card read operation (in
570 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700571 * Select the 3/4 of the range and configure the DLL with the
572 * selected DLL clock output phase.
573 */
574
575static int msm_find_most_appropriate_phase(struct sdhci_host *host,
576 u8 *phase_table, u8 total_phases)
577{
578 int ret;
579 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
580 u8 phases_per_row[MAX_PHASES] = {0};
581 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
582 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
583 bool phase_0_found = false, phase_15_found = false;
584 struct mmc_host *mmc = host->mmc;
585
586 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
587 if (!total_phases || (total_phases > MAX_PHASES)) {
588 pr_err("%s: %s: invalid argument: total_phases=%d\n",
589 mmc_hostname(mmc), __func__, total_phases);
590 return -EINVAL;
591 }
592
593 for (cnt = 0; cnt < total_phases; cnt++) {
594 ranges[row_index][col_index] = phase_table[cnt];
595 phases_per_row[row_index] += 1;
596 col_index++;
597
598 if ((cnt + 1) == total_phases) {
599 continue;
600 /* check if next phase in phase_table is consecutive or not */
601 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
602 row_index++;
603 col_index = 0;
604 }
605 }
606
607 if (row_index >= MAX_PHASES)
608 return -EINVAL;
609
610 /* Check if phase-0 is present in first valid window? */
611 if (!ranges[0][0]) {
612 phase_0_found = true;
613 phase_0_raw_index = 0;
614 /* Check if cycle exist between 2 valid windows */
615 for (cnt = 1; cnt <= row_index; cnt++) {
616 if (phases_per_row[cnt]) {
617 for (i = 0; i < phases_per_row[cnt]; i++) {
618 if (ranges[cnt][i] == 15) {
619 phase_15_found = true;
620 phase_15_raw_index = cnt;
621 break;
622 }
623 }
624 }
625 }
626 }
627
628 /* If 2 valid windows form cycle then merge them as single window */
629 if (phase_0_found && phase_15_found) {
630 /* number of phases in raw where phase 0 is present */
631 u8 phases_0 = phases_per_row[phase_0_raw_index];
632 /* number of phases in raw where phase 15 is present */
633 u8 phases_15 = phases_per_row[phase_15_raw_index];
634
635 if (phases_0 + phases_15 >= MAX_PHASES)
636 /*
637 * If there are more than 1 phase windows then total
638 * number of phases in both the windows should not be
639 * more than or equal to MAX_PHASES.
640 */
641 return -EINVAL;
642
643 /* Merge 2 cyclic windows */
644 i = phases_15;
645 for (cnt = 0; cnt < phases_0; cnt++) {
646 ranges[phase_15_raw_index][i] =
647 ranges[phase_0_raw_index][cnt];
648 if (++i >= MAX_PHASES)
649 break;
650 }
651
652 phases_per_row[phase_0_raw_index] = 0;
653 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
654 }
655
656 for (cnt = 0; cnt <= row_index; cnt++) {
657 if (phases_per_row[cnt] > curr_max) {
658 curr_max = phases_per_row[cnt];
659 selected_row_index = cnt;
660 }
661 }
662
663 i = ((curr_max * 3) / 4);
664 if (i)
665 i--;
666
667 ret = (int)ranges[selected_row_index][i];
668
669 if (ret >= MAX_PHASES) {
670 ret = -EINVAL;
671 pr_err("%s: %s: invalid phase selected=%d\n",
672 mmc_hostname(mmc), __func__, ret);
673 }
674
675 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
676 return ret;
677}
678
679static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
680{
681 u32 mclk_freq = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530682 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
683 struct sdhci_msm_host *msm_host = pltfm_host->priv;
684 const struct sdhci_msm_offset *msm_host_offset =
685 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700686
687 /* Program the MCLK value to MCLK_FREQ bit field */
688 if (host->clock <= 112000000)
689 mclk_freq = 0;
690 else if (host->clock <= 125000000)
691 mclk_freq = 1;
692 else if (host->clock <= 137000000)
693 mclk_freq = 2;
694 else if (host->clock <= 150000000)
695 mclk_freq = 3;
696 else if (host->clock <= 162000000)
697 mclk_freq = 4;
698 else if (host->clock <= 175000000)
699 mclk_freq = 5;
700 else if (host->clock <= 187000000)
701 mclk_freq = 6;
Subhash Jadavanib3235262017-07-19 16:56:04 -0700702 else if (host->clock <= 208000000)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700703 mclk_freq = 7;
704
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530705 writel_relaxed(((readl_relaxed(host->ioaddr +
706 msm_host_offset->CORE_DLL_CONFIG)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700707 & ~(7 << 24)) | (mclk_freq << 24)),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530708 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700709}
710
711/* Initialize the DLL (Programmable Delay Line ) */
712static int msm_init_cm_dll(struct sdhci_host *host)
713{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800714 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
715 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530716 const struct sdhci_msm_offset *msm_host_offset =
717 msm_host->offset;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700718 struct mmc_host *mmc = host->mmc;
719 int rc = 0;
720 unsigned long flags;
721 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530722 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700723
724 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
725 spin_lock_irqsave(&host->lock, flags);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530726 prev_pwrsave = !!(readl_relaxed(host->ioaddr +
727 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530728 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700729 /*
730 * Make sure that clock is always enabled when DLL
731 * tuning is in progress. Keeping PWRSAVE ON may
732 * turn off the clock. So let's disable the PWRSAVE
733 * here and re-enable it once tuning is completed.
734 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530735 if (prev_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530736 writel_relaxed((readl_relaxed(host->ioaddr +
737 msm_host_offset->CORE_VENDOR_SPEC)
738 & ~CORE_CLK_PWRSAVE), host->ioaddr +
739 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530740 curr_pwrsave = false;
741 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700742
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800743 if (msm_host->use_updated_dll_reset) {
744 /* Disable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530745 writel_relaxed((readl_relaxed(host->ioaddr +
746 msm_host_offset->CORE_DLL_CONFIG)
747 & ~CORE_CK_OUT_EN), host->ioaddr +
748 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800749
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530750 writel_relaxed((readl_relaxed(host->ioaddr +
751 msm_host_offset->CORE_DLL_CONFIG_2)
752 | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
753 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800754 }
755
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700756 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530757 writel_relaxed((readl_relaxed(host->ioaddr +
758 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
759 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700760
761 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530762 writel_relaxed((readl_relaxed(host->ioaddr +
763 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
764 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700765 msm_cm_dll_set_freq(host);
766
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800767 if (msm_host->use_updated_dll_reset) {
768 u32 mclk_freq = 0;
769
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530770 if ((readl_relaxed(host->ioaddr +
771 msm_host_offset->CORE_DLL_CONFIG_2)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800772 & CORE_FLL_CYCLE_CNT))
773 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
774 else
775 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
776
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530777 writel_relaxed(((readl_relaxed(host->ioaddr +
778 msm_host_offset->CORE_DLL_CONFIG_2)
779 & ~(0xFF << 10)) | (mclk_freq << 10)),
780 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800781 /* wait for 5us before enabling DLL clock */
782 udelay(5);
783 }
784
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700785 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530786 writel_relaxed((readl_relaxed(host->ioaddr +
787 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
788 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700789
790 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530791 writel_relaxed((readl_relaxed(host->ioaddr +
792 msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
793 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700794
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800795 if (msm_host->use_updated_dll_reset) {
796 msm_cm_dll_set_freq(host);
797 /* Enable the DLL clock */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530798 writel_relaxed((readl_relaxed(host->ioaddr +
799 msm_host_offset->CORE_DLL_CONFIG_2)
800 & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
801 msm_host_offset->CORE_DLL_CONFIG_2);
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800802 }
803
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +0530804 /*
805 * Configure DLL user control register to enable DLL status
806 * This setting is applicable to SDCC v5.1 onwards only
807 */
808 if (msm_host->need_dll_user_ctl) {
809 writel_relaxed(DLL_USR_CTL_POR_VAL | ENABLE_DLL_LOCK_STATUS,
810 host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
811 }
812
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700813 /* Set DLL_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530814 writel_relaxed((readl_relaxed(host->ioaddr +
815 msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
816 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700817
818 /* Set CK_OUT_EN bit to 1. */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530819 writel_relaxed((readl_relaxed(host->ioaddr +
820 msm_host_offset->CORE_DLL_CONFIG)
821 | CORE_CK_OUT_EN), host->ioaddr +
822 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700823
824 wait_cnt = 50;
825 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530826 while (!(readl_relaxed(host->ioaddr +
827 msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700828 /* max. wait for 50us sec for LOCK bit to be set */
829 if (--wait_cnt == 0) {
830 pr_err("%s: %s: DLL failed to LOCK\n",
831 mmc_hostname(mmc), __func__);
832 rc = -ETIMEDOUT;
833 goto out;
834 }
835 /* wait for 1us before polling again */
836 udelay(1);
837 }
838
839out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530840 /* Restore the correct PWRSAVE state */
841 if (prev_pwrsave ^ curr_pwrsave) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530842 u32 reg = readl_relaxed(host->ioaddr +
843 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530844
845 if (prev_pwrsave)
846 reg |= CORE_CLK_PWRSAVE;
847 else
848 reg &= ~CORE_CLK_PWRSAVE;
849
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530850 writel_relaxed(reg, host->ioaddr +
851 msm_host_offset->CORE_VENDOR_SPEC);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530852 }
853
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700854 spin_unlock_irqrestore(&host->lock, flags);
855 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
856 return rc;
857}
858
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700859static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
860{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700861 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700862 int ret = 0;
863 int cdc_err = 0;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530864 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
865 struct sdhci_msm_host *msm_host = pltfm_host->priv;
866 const struct sdhci_msm_offset *msm_host_offset =
867 msm_host->offset;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700868
869 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
870
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700871 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530872 writel_relaxed((readl_relaxed(host->ioaddr +
873 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700874 & ~CORE_CDC_T4_DLY_SEL),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530875 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700876
877 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
878 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
879 & ~CORE_CDC_SWITCH_BYPASS_OFF),
880 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
881
882 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
883 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
884 | CORE_CDC_SWITCH_RC_EN),
885 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
886
887 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530888 writel_relaxed((readl_relaxed(host->ioaddr +
889 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700890 & ~CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530891 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700892
893 /*
894 * Perform CDC Register Initialization Sequence
895 *
896 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
897 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
898 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
899 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
900 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
901 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
902 * CORE_CSR_CDC_DELAY_CFG 0x3AC
903 * CORE_CDC_OFFSET_CFG 0x0
904 * CORE_CDC_SLAVE_DDA_CFG 0x16334
905 */
906
907 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
908 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
909 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
910 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
911 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
912 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700913 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700914 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
915 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
916
917 /* CDC HW Calibration */
918
919 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
920 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
921 | CORE_SW_TRIG_FULL_CALIB),
922 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
923
924 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
925 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
926 & ~CORE_SW_TRIG_FULL_CALIB),
927 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
928
929 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
930 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
931 | CORE_HW_AUTOCAL_ENA),
932 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
933
934 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
935 writel_relaxed((readl_relaxed(host->ioaddr +
936 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
937 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
938
939 mb();
940
941 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700942 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
943 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
944
945 if (ret == -ETIMEDOUT) {
946 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700947 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700948 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700949 }
950
951 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
952 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
953 & CORE_CDC_ERROR_CODE_MASK;
954 if (cdc_err) {
955 pr_err("%s: %s: CDC Error Code %d\n",
956 mmc_hostname(host->mmc), __func__, cdc_err);
957 ret = -EINVAL;
958 goto out;
959 }
960
961 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530962 writel_relaxed((readl_relaxed(host->ioaddr +
963 msm_host_offset->CORE_DDR_200_CFG)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700964 | CORE_START_CDC_TRAFFIC),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530965 host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700966out:
967 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
968 __func__, ret);
969 return ret;
970}
971
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700972static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
973{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530974 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
975 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530976 const struct sdhci_msm_offset *msm_host_offset =
977 msm_host->offset;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530978 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700979 int ret = 0;
980
981 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
982
983 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530984 * Reprogramming the value in case it might have been modified by
985 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700986 */
Vijay Viswanatha5492612017-10-17 15:38:55 +0530987 if (msm_host->pdata->rclk_wa) {
988 writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
989 msm_host_offset->CORE_DDR_CONFIG_2);
990 } else if (msm_host->rclk_delay_fix) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530991 writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
992 msm_host_offset->CORE_DDR_CONFIG_2);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700993 } else {
994 ddr_config = DDR_CONFIG_POR_VAL &
995 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
996 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +0530997 writel_relaxed(ddr_config, host->ioaddr +
998 msm_host_offset->CORE_DDR_CONFIG);
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700999 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001000
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301001 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301002 writel_relaxed((readl_relaxed(host->ioaddr +
1003 msm_host_offset->CORE_DDR_200_CFG)
1004 | CORE_CMDIN_RCLK_EN), host->ioaddr +
1005 msm_host_offset->CORE_DDR_200_CFG);
Ritesh Harjaniea709662015-05-27 15:40:24 +05301006
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001007 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301008 writel_relaxed((readl_relaxed(host->ioaddr +
1009 msm_host_offset->CORE_DLL_CONFIG_2)
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001010 | CORE_DDR_CAL_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301011 host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001012
1013 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301014 ret = readl_poll_timeout(host->ioaddr +
1015 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001016 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
1017
1018 if (ret == -ETIMEDOUT) {
1019 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
1020 mmc_hostname(host->mmc), __func__);
1021 goto out;
1022 }
1023
Ritesh Harjani764065e2015-05-13 14:14:45 +05301024 /*
1025 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1026 * when MCLK is gated OFF, it is not gated for less than 0.5us
1027 * and MCLK must be switched on for at-least 1us before DATA
1028 * starts coming. Controllers with 14lpp tech DLL cannot
1029 * guarantee above requirement. So PWRSAVE_DLL should not be
1030 * turned on for host controllers using this DLL.
1031 */
1032 if (!msm_host->use_14lpp_dll)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301033 writel_relaxed((readl_relaxed(host->ioaddr +
1034 msm_host_offset->CORE_VENDOR_SPEC3)
1035 | CORE_PWRSAVE_DLL), host->ioaddr +
1036 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001037 mb();
1038out:
1039 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1040 __func__, ret);
1041 return ret;
1042}
1043
Ritesh Harjaniea709662015-05-27 15:40:24 +05301044static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
1045{
1046 int ret = 0;
1047 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1048 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1049 struct mmc_host *mmc = host->mmc;
1050
1051 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1052
Ritesh Harjani70e2a712015-08-25 11:34:16 +05301053 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
1054 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +05301055 mmc_hostname(mmc));
1056 return -EINVAL;
1057 }
1058
1059 if (msm_host->calibration_done ||
1060 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
1061 return 0;
1062 }
1063
1064 /*
1065 * Reset the tuning block.
1066 */
1067 ret = msm_init_cm_dll(host);
1068 if (ret)
1069 goto out;
1070
1071 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1072out:
1073 if (!ret)
1074 msm_host->calibration_done = true;
1075 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1076 __func__, ret);
1077 return ret;
1078}
1079
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001080static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1081{
1082 int ret = 0;
1083 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1084 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301085 const struct sdhci_msm_offset *msm_host_offset =
1086 msm_host->offset;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001087
1088 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
1089
1090 /*
1091 * Retuning in HS400 (DDR mode) will fail, just reset the
1092 * tuning block and restore the saved tuning phase.
1093 */
1094 ret = msm_init_cm_dll(host);
1095 if (ret)
1096 goto out;
1097
1098 /* Set the selected phase in delay line hw block */
1099 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1100 if (ret)
1101 goto out;
1102
Krishna Konda0e8efba2014-06-23 14:50:38 -07001103 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05301104 writel_relaxed((readl_relaxed(host->ioaddr +
1105 msm_host_offset->CORE_DLL_CONFIG)
1106 | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
1107 msm_host_offset->CORE_DLL_CONFIG);
Krishna Konda0e8efba2014-06-23 14:50:38 -07001108
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001109 if (msm_host->use_cdclp533)
1110 /* Calibrate CDCLP533 DLL HW */
1111 ret = sdhci_msm_cdclp533_calibration(host);
1112 else
1113 /* Calibrate CM_DLL_SDC4 HW */
1114 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1115out:
1116 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
1117 __func__, ret);
1118 return ret;
1119}
1120
Krishna Konda96e6b112013-10-28 15:25:03 -07001121static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
1122 u8 drv_type)
1123{
1124 struct mmc_command cmd = {0};
1125 struct mmc_request mrq = {NULL};
1126 struct mmc_host *mmc = host->mmc;
1127 u8 val = ((drv_type << 4) | 2);
1128
1129 cmd.opcode = MMC_SWITCH;
1130 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1131 (EXT_CSD_HS_TIMING << 16) |
1132 (val << 8) |
1133 EXT_CSD_CMD_SET_NORMAL;
1134 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
1135 /* 1 sec */
1136 cmd.busy_timeout = 1000 * 1000;
1137
1138 memset(cmd.resp, 0, sizeof(cmd.resp));
1139 cmd.retries = 3;
1140
1141 mrq.cmd = &cmd;
1142 cmd.data = NULL;
1143
1144 mmc_wait_for_req(mmc, &mrq);
1145 pr_debug("%s: %s: set card drive type to %d\n",
1146 mmc_hostname(mmc), __func__,
1147 drv_type);
1148}
1149
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001150int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
1151{
1152 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301153 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -07001154 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001155 const u32 *tuning_block_pattern = tuning_block_64;
1156 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
1157 int rc;
1158 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301159 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001160 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1161 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -07001162 u8 drv_type = 0;
1163 bool drv_type_changed = false;
1164 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301165 int sts_retry;
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301166 u8 last_good_phase = 0;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301167
1168 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001169 * Tuning is required for SDR104, HS200 and HS400 cards and
1170 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301171 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001172 if (host->clock <= CORE_FREQ_100MHZ ||
1173 !((ios.timing == MMC_TIMING_MMC_HS400) ||
1174 (ios.timing == MMC_TIMING_MMC_HS200) ||
1175 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301176 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001177
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301178 /*
1179 * Don't allow re-tuning for CRC errors observed for any commands
1180 * that are sent during tuning sequence itself.
1181 */
1182 if (msm_host->tuning_in_progress)
1183 return 0;
1184 msm_host->tuning_in_progress = true;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001185 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001186
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001187 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001188 if (msm_host->tuning_done && !msm_host->calibration_done &&
1189 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07001190 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001191 spin_lock_irqsave(&host->lock, flags);
1192 if (!rc)
1193 msm_host->calibration_done = true;
1194 spin_unlock_irqrestore(&host->lock, flags);
1195 goto out;
1196 }
1197
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001198 spin_lock_irqsave(&host->lock, flags);
1199
1200 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
1201 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
1202 tuning_block_pattern = tuning_block_128;
1203 size = sizeof(tuning_block_128);
1204 }
1205 spin_unlock_irqrestore(&host->lock, flags);
1206
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001207 data_buf = kmalloc(size, GFP_KERNEL);
1208 if (!data_buf) {
1209 rc = -ENOMEM;
1210 goto out;
1211 }
1212
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301213retry:
Krishna Konda96e6b112013-10-28 15:25:03 -07001214 tuned_phase_cnt = 0;
1215
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301216 /* first of all reset the tuning block */
1217 rc = msm_init_cm_dll(host);
1218 if (rc)
1219 goto kfree;
1220
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001221 phase = 0;
1222 do {
1223 struct mmc_command cmd = {0};
1224 struct mmc_data data = {0};
1225 struct mmc_request mrq = {
1226 .cmd = &cmd,
1227 .data = &data
1228 };
1229 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301230 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001231
1232 /* set the phase in delay line hw block */
1233 rc = msm_config_cm_dll_phase(host, phase);
1234 if (rc)
1235 goto kfree;
1236
1237 cmd.opcode = opcode;
1238 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1239
1240 data.blksz = size;
1241 data.blocks = 1;
1242 data.flags = MMC_DATA_READ;
1243 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1244
1245 data.sg = &sg;
1246 data.sg_len = 1;
1247 sg_init_one(&sg, data_buf, size);
1248 memset(data_buf, 0, size);
1249 mmc_wait_for_req(mmc, &mrq);
1250
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301251 if (card && (cmd.error || data.error)) {
Veerabhadrarao Badiganti174f3a82017-06-15 18:44:19 +05301252 /*
1253 * Set the dll to last known good phase while sending
1254 * status command to ensure that status command won't
1255 * fail due to bad phase.
1256 */
1257 if (tuned_phase_cnt)
1258 last_good_phase =
1259 tuned_phases[tuned_phase_cnt-1];
1260 else if (msm_host->saved_tuning_phase !=
1261 INVALID_TUNING_PHASE)
1262 last_good_phase = msm_host->saved_tuning_phase;
1263
1264 rc = msm_config_cm_dll_phase(host, last_good_phase);
1265 if (rc)
1266 goto kfree;
1267
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301268 sts_cmd.opcode = MMC_SEND_STATUS;
1269 sts_cmd.arg = card->rca << 16;
1270 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1271 sts_retry = 5;
1272 while (sts_retry) {
1273 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1274
1275 if (sts_cmd.error ||
1276 (R1_CURRENT_STATE(sts_cmd.resp[0])
1277 != R1_STATE_TRAN)) {
1278 sts_retry--;
1279 /*
1280 * wait for at least 146 MCLK cycles for
1281 * the card to move to TRANS state. As
1282 * the MCLK would be min 200MHz for
1283 * tuning, we need max 0.73us delay. To
1284 * be on safer side 1ms delay is given.
1285 */
1286 usleep_range(1000, 1200);
1287 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1288 mmc_hostname(mmc), phase,
1289 sts_cmd.error, sts_cmd.resp[0]);
1290 continue;
1291 }
1292 break;
1293 };
1294 }
1295
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001296 if (!cmd.error && !data.error &&
1297 !memcmp(data_buf, tuning_block_pattern, size)) {
1298 /* tuning is successful at this tuning point */
1299 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001300 pr_debug("%s: %s: found *** good *** phase = %d\n",
1301 mmc_hostname(mmc), __func__, phase);
1302 } else {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301303 /* Ignore crc errors occurred during tuning */
1304 if (cmd.error)
1305 mmc->err_stats[MMC_ERR_CMD_CRC]--;
1306 else if (data.error)
1307 mmc->err_stats[MMC_ERR_DAT_CRC]--;
Krishna Konda96e6b112013-10-28 15:25:03 -07001308 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001309 mmc_hostname(mmc), __func__, phase);
1310 }
1311 } while (++phase < 16);
1312
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301313 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1314 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001315 /*
1316 * If all phases pass then its a problem. So change the card's
1317 * drive type to a different value, if supported and repeat
1318 * tuning until at least one phase fails. Then set the original
1319 * drive type back.
1320 *
1321 * If all the phases still pass after trying all possible
1322 * drive types, then one of those 16 phases will be picked.
1323 * This is no different from what was going on before the
1324 * modification to change drive type and retune.
1325 */
1326 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1327 tuned_phase_cnt);
1328
1329 /* set drive type to other value . default setting is 0x0 */
1330 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001331 pr_debug("%s: trying different drive strength (%d)\n",
1332 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001333 if (card->ext_csd.raw_driver_strength &
1334 (1 << drv_type)) {
1335 sdhci_msm_set_mmc_drv_type(host, opcode,
1336 drv_type);
1337 if (!drv_type_changed)
1338 drv_type_changed = true;
1339 goto retry;
1340 }
1341 }
1342 }
1343
1344 /* reset drive type to default (50 ohm) if changed */
1345 if (drv_type_changed)
1346 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1347
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001348 if (tuned_phase_cnt) {
1349 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1350 tuned_phase_cnt);
1351 if (rc < 0)
1352 goto kfree;
1353 else
1354 phase = (u8)rc;
1355
1356 /*
1357 * Finally set the selected phase in delay
1358 * line hw block.
1359 */
1360 rc = msm_config_cm_dll_phase(host, phase);
1361 if (rc)
1362 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001363 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001364 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1365 mmc_hostname(mmc), __func__, phase);
1366 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301367 if (--tuning_seq_cnt)
1368 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001369 /* tuning failed */
1370 pr_err("%s: %s: no tuning point found\n",
1371 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301372 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001373 }
1374
1375kfree:
1376 kfree(data_buf);
1377out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001378 spin_lock_irqsave(&host->lock, flags);
1379 if (!rc)
1380 msm_host->tuning_done = true;
1381 spin_unlock_irqrestore(&host->lock, flags);
Sahitya Tummala0523cda2015-11-17 09:24:53 +05301382 msm_host->tuning_in_progress = false;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001383 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001384 return rc;
1385}
1386
Asutosh Das0ef24812012-12-18 16:14:02 +05301387static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1388{
1389 struct sdhci_msm_gpio_data *curr;
1390 int i, ret = 0;
1391
1392 curr = pdata->pin_data->gpio_data;
1393 for (i = 0; i < curr->size; i++) {
1394 if (!gpio_is_valid(curr->gpio[i].no)) {
1395 ret = -EINVAL;
1396 pr_err("%s: Invalid gpio = %d\n", __func__,
1397 curr->gpio[i].no);
1398 goto free_gpios;
1399 }
1400 if (enable) {
1401 ret = gpio_request(curr->gpio[i].no,
1402 curr->gpio[i].name);
1403 if (ret) {
1404 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1405 __func__, curr->gpio[i].no,
1406 curr->gpio[i].name, ret);
1407 goto free_gpios;
1408 }
1409 curr->gpio[i].is_enabled = true;
1410 } else {
1411 gpio_free(curr->gpio[i].no);
1412 curr->gpio[i].is_enabled = false;
1413 }
1414 }
1415 return ret;
1416
1417free_gpios:
1418 for (i--; i >= 0; i--) {
1419 gpio_free(curr->gpio[i].no);
1420 curr->gpio[i].is_enabled = false;
1421 }
1422 return ret;
1423}
1424
Can Guob903ad82017-10-17 13:22:53 +08001425static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
1426 unsigned int clock)
1427{
1428 int ret = 0;
1429
1430 if (clock > 150000000) {
1431 if (pdata->pctrl_data->pins_drv_type_200MHz)
1432 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1433 pdata->pctrl_data->pins_drv_type_200MHz);
1434 } else if (clock > 75000000) {
1435 if (pdata->pctrl_data->pins_drv_type_100MHz)
1436 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1437 pdata->pctrl_data->pins_drv_type_100MHz);
1438 } else if (clock > 400000) {
1439 if (pdata->pctrl_data->pins_drv_type_50MHz)
1440 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1441 pdata->pctrl_data->pins_drv_type_50MHz);
1442 } else {
1443 if (pdata->pctrl_data->pins_drv_type_400KHz)
1444 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1445 pdata->pctrl_data->pins_drv_type_400KHz);
1446 }
1447
1448 return ret;
1449}
1450
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301451static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1452 bool enable)
1453{
1454 int ret = 0;
1455
1456 if (enable)
1457 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1458 pdata->pctrl_data->pins_active);
1459 else
1460 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1461 pdata->pctrl_data->pins_sleep);
1462
1463 if (ret < 0)
1464 pr_err("%s state for pinctrl failed with %d\n",
1465 enable ? "Enabling" : "Disabling", ret);
1466
1467 return ret;
1468}
1469
Asutosh Das0ef24812012-12-18 16:14:02 +05301470static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1471{
1472 int ret = 0;
1473
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301474 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301475 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301476 } else if (pdata->pctrl_data) {
1477 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1478 goto out;
1479 } else if (!pdata->pin_data) {
1480 return 0;
1481 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301482
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301483 if (pdata->pin_data->is_gpio)
1484 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301485out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301486 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301487 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301488
1489 return ret;
1490}
1491
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301492static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1493 u32 **out, int *len, u32 size)
1494{
1495 int ret = 0;
1496 struct device_node *np = dev->of_node;
1497 size_t sz;
1498 u32 *arr = NULL;
1499
1500 if (!of_get_property(np, prop_name, len)) {
1501 ret = -EINVAL;
1502 goto out;
1503 }
1504 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001505 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301506 dev_err(dev, "%s invalid size\n", prop_name);
1507 ret = -EINVAL;
1508 goto out;
1509 }
1510
1511 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1512 if (!arr) {
1513 dev_err(dev, "%s failed allocating memory\n", prop_name);
1514 ret = -ENOMEM;
1515 goto out;
1516 }
1517
1518 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1519 if (ret < 0) {
1520 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1521 goto out;
1522 }
1523 *out = arr;
1524out:
1525 if (ret)
1526 *len = 0;
1527 return ret;
1528}
1529
Asutosh Das0ef24812012-12-18 16:14:02 +05301530#define MAX_PROP_SIZE 32
1531static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1532 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1533{
1534 int len, ret = 0;
1535 const __be32 *prop;
1536 char prop_name[MAX_PROP_SIZE];
1537 struct sdhci_msm_reg_data *vreg;
1538 struct device_node *np = dev->of_node;
1539
1540 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1541 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301542 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301543 return ret;
1544 }
1545
1546 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1547 if (!vreg) {
1548 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1549 ret = -ENOMEM;
1550 return ret;
1551 }
1552
1553 vreg->name = vreg_name;
1554
1555 snprintf(prop_name, MAX_PROP_SIZE,
1556 "qcom,%s-always-on", vreg_name);
1557 if (of_get_property(np, prop_name, NULL))
1558 vreg->is_always_on = true;
1559
1560 snprintf(prop_name, MAX_PROP_SIZE,
1561 "qcom,%s-lpm-sup", vreg_name);
1562 if (of_get_property(np, prop_name, NULL))
1563 vreg->lpm_sup = true;
1564
1565 snprintf(prop_name, MAX_PROP_SIZE,
1566 "qcom,%s-voltage-level", vreg_name);
1567 prop = of_get_property(np, prop_name, &len);
1568 if (!prop || (len != (2 * sizeof(__be32)))) {
1569 dev_warn(dev, "%s %s property\n",
1570 prop ? "invalid format" : "no", prop_name);
1571 } else {
1572 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1573 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1574 }
1575
1576 snprintf(prop_name, MAX_PROP_SIZE,
1577 "qcom,%s-current-level", vreg_name);
1578 prop = of_get_property(np, prop_name, &len);
1579 if (!prop || (len != (2 * sizeof(__be32)))) {
1580 dev_warn(dev, "%s %s property\n",
1581 prop ? "invalid format" : "no", prop_name);
1582 } else {
1583 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1584 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1585 }
1586
1587 *vreg_data = vreg;
1588 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1589 vreg->name, vreg->is_always_on ? "always_on," : "",
1590 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1591 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1592
1593 return ret;
1594}
1595
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301596static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1597 struct sdhci_msm_pltfm_data *pdata)
1598{
1599 struct sdhci_pinctrl_data *pctrl_data;
1600 struct pinctrl *pctrl;
1601 int ret = 0;
1602
1603 /* Try to obtain pinctrl handle */
1604 pctrl = devm_pinctrl_get(dev);
1605 if (IS_ERR(pctrl)) {
1606 ret = PTR_ERR(pctrl);
1607 goto out;
1608 }
1609 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1610 if (!pctrl_data) {
1611 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1612 ret = -ENOMEM;
1613 goto out;
1614 }
1615 pctrl_data->pctrl = pctrl;
1616 /* Look-up and keep the states handy to be used later */
1617 pctrl_data->pins_active = pinctrl_lookup_state(
1618 pctrl_data->pctrl, "active");
1619 if (IS_ERR(pctrl_data->pins_active)) {
1620 ret = PTR_ERR(pctrl_data->pins_active);
1621 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1622 goto out;
1623 }
1624 pctrl_data->pins_sleep = pinctrl_lookup_state(
1625 pctrl_data->pctrl, "sleep");
1626 if (IS_ERR(pctrl_data->pins_sleep)) {
1627 ret = PTR_ERR(pctrl_data->pins_sleep);
1628 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1629 goto out;
1630 }
Can Guob903ad82017-10-17 13:22:53 +08001631
1632 pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
1633 pctrl_data->pctrl, "ds_400KHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301634 if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001635 dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301636 pctrl_data->pins_drv_type_400KHz = NULL;
1637 }
Can Guob903ad82017-10-17 13:22:53 +08001638
1639 pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
1640 pctrl_data->pctrl, "ds_50MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301641 if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001642 dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301643 pctrl_data->pins_drv_type_50MHz = NULL;
1644 }
Can Guob903ad82017-10-17 13:22:53 +08001645
1646 pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
1647 pctrl_data->pctrl, "ds_100MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301648 if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001649 dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301650 pctrl_data->pins_drv_type_100MHz = NULL;
1651 }
Can Guob903ad82017-10-17 13:22:53 +08001652
1653 pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
1654 pctrl_data->pctrl, "ds_200MHz");
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301655 if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
Can Guob903ad82017-10-17 13:22:53 +08001656 dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
Veerabhadrarao Badiganti0d5fc282017-10-27 16:23:13 +05301657 pctrl_data->pins_drv_type_200MHz = NULL;
1658 }
Can Guob903ad82017-10-17 13:22:53 +08001659
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301660 pdata->pctrl_data = pctrl_data;
1661out:
1662 return ret;
1663}
1664
Asutosh Das0ef24812012-12-18 16:14:02 +05301665#define GPIO_NAME_MAX_LEN 32
1666static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1667 struct sdhci_msm_pltfm_data *pdata)
1668{
1669 int ret = 0, cnt, i;
1670 struct sdhci_msm_pin_data *pin_data;
1671 struct device_node *np = dev->of_node;
1672
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301673 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1674 if (!ret) {
1675 goto out;
1676 } else if (ret == -EPROBE_DEFER) {
1677 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1678 goto out;
1679 } else {
1680 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1681 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301682 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301683 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301684 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1685 if (!pin_data) {
1686 dev_err(dev, "No memory for pin_data\n");
1687 ret = -ENOMEM;
1688 goto out;
1689 }
1690
1691 cnt = of_gpio_count(np);
1692 if (cnt > 0) {
1693 pin_data->gpio_data = devm_kzalloc(dev,
1694 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1695 if (!pin_data->gpio_data) {
1696 dev_err(dev, "No memory for gpio_data\n");
1697 ret = -ENOMEM;
1698 goto out;
1699 }
1700 pin_data->gpio_data->size = cnt;
1701 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1702 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1703
1704 if (!pin_data->gpio_data->gpio) {
1705 dev_err(dev, "No memory for gpio\n");
1706 ret = -ENOMEM;
1707 goto out;
1708 }
1709
1710 for (i = 0; i < cnt; i++) {
1711 const char *name = NULL;
1712 char result[GPIO_NAME_MAX_LEN];
1713 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1714 of_property_read_string_index(np,
1715 "qcom,gpio-names", i, &name);
1716
1717 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1718 dev_name(dev), name ? name : "?");
1719 pin_data->gpio_data->gpio[i].name = result;
1720 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1721 pin_data->gpio_data->gpio[i].name,
1722 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301723 }
1724 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301725 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301726out:
1727 if (ret)
1728 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1729 return ret;
1730}
1731
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001732#ifdef CONFIG_SMP
1733static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1734{
1735 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1736}
1737#else
1738static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1739#endif
1740
Gilad Bronerc788a672015-09-08 15:39:11 +03001741static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1742 struct sdhci_msm_pltfm_data *pdata)
1743{
1744 struct device_node *np = dev->of_node;
1745 const char *str;
1746 u32 cpu;
1747 int ret = 0;
1748 int i;
1749
1750 pdata->pm_qos_data.irq_valid = false;
1751 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1752 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1753 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001754 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001755 }
1756
1757 /* must specify cpu for "affine_cores" type */
1758 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1759 pdata->pm_qos_data.irq_cpu = -1;
1760 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1761 if (ret) {
1762 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1763 ret);
1764 goto out;
1765 }
1766 if (cpu < 0 || cpu >= num_possible_cpus()) {
1767 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1768 __func__, cpu, num_possible_cpus());
1769 ret = -EINVAL;
1770 goto out;
1771 }
1772 pdata->pm_qos_data.irq_cpu = cpu;
1773 }
1774
1775 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1776 SDHCI_POWER_POLICY_NUM) {
1777 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1778 __func__, SDHCI_POWER_POLICY_NUM);
1779 ret = -EINVAL;
1780 goto out;
1781 }
1782
1783 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1784 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1785 &pdata->pm_qos_data.irq_latency.latency[i]);
1786
1787 pdata->pm_qos_data.irq_valid = true;
1788out:
1789 return ret;
1790}
1791
1792static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1793 struct sdhci_msm_pltfm_data *pdata)
1794{
1795 struct device_node *np = dev->of_node;
1796 u32 mask;
1797 int nr_groups;
1798 int ret;
1799 int i;
1800
1801 /* Read cpu group mapping */
1802 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1803 if (nr_groups <= 0) {
1804 ret = -EINVAL;
1805 goto out;
1806 }
1807 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1808 pdata->pm_qos_data.cpu_group_map.mask =
1809 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1810 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1811 ret = -ENOMEM;
1812 goto out;
1813 }
1814
1815 for (i = 0; i < nr_groups; i++) {
1816 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1817 i, &mask);
1818
1819 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1820 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1821 cpu_possible_mask)) {
1822 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1823 __func__, mask, i);
1824 ret = -EINVAL;
1825 goto free_res;
1826 }
1827 }
1828 return 0;
1829
1830free_res:
1831 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1832out:
1833 return ret;
1834}
1835
1836static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1837 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1838{
1839 struct device_node *np = dev->of_node;
1840 struct sdhci_msm_pm_qos_latency *values;
1841 int ret;
1842 int i;
1843 int group;
1844 int cfg;
1845
1846 ret = of_property_count_u32_elems(np, name);
1847 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1848 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1849 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1850 ret);
1851 return -EINVAL;
1852 } else if (ret < 0) {
1853 return ret;
1854 }
1855
1856 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1857 GFP_KERNEL);
1858 if (!values)
1859 return -ENOMEM;
1860
1861 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1862 group = i / SDHCI_POWER_POLICY_NUM;
1863 cfg = i % SDHCI_POWER_POLICY_NUM;
1864 of_property_read_u32_index(np, name, i,
1865 &(values[group].latency[cfg]));
1866 }
1867
1868 *latency = values;
1869 return 0;
1870}
1871
1872static void sdhci_msm_pm_qos_parse(struct device *dev,
1873 struct sdhci_msm_pltfm_data *pdata)
1874{
1875 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1876 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1877 __func__);
1878
1879 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1880 pdata->pm_qos_data.cmdq_valid =
1881 !sdhci_msm_pm_qos_parse_latency(dev,
1882 "qcom,pm-qos-cmdq-latency-us",
1883 pdata->pm_qos_data.cpu_group_map.nr_groups,
1884 &pdata->pm_qos_data.cmdq_latency);
1885 pdata->pm_qos_data.legacy_valid =
1886 !sdhci_msm_pm_qos_parse_latency(dev,
1887 "qcom,pm-qos-legacy-latency-us",
1888 pdata->pm_qos_data.cpu_group_map.nr_groups,
1889 &pdata->pm_qos_data.latency);
1890 if (!pdata->pm_qos_data.cmdq_valid &&
1891 !pdata->pm_qos_data.legacy_valid) {
1892 /* clean-up previously allocated arrays */
1893 kfree(pdata->pm_qos_data.latency);
1894 kfree(pdata->pm_qos_data.cmdq_latency);
1895 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1896 __func__);
1897 }
1898 } else {
1899 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1900 __func__);
1901 }
1902}
1903
Asutosh Das1c43b132018-01-11 18:08:40 +05301904#ifdef CONFIG_NVMEM
1905/* Parse qfprom data for deciding on errata work-arounds */
1906static long qfprom_read(struct device *dev, const char *name)
1907{
1908 struct nvmem_cell *cell;
1909 ssize_t len = 0;
1910 u32 *buf, val = 0;
1911 long err = 0;
1912
1913 cell = nvmem_cell_get(dev, name);
1914 if (IS_ERR(cell)) {
1915 err = PTR_ERR(cell);
1916 dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
1917 /* If entry does not exist, then that is not an error */
1918 if (err == -ENOENT)
1919 err = 0;
1920 return err;
1921 }
1922
1923 buf = (u32 *)nvmem_cell_read(cell, &len);
1924 if (IS_ERR(buf) || !len) {
1925 dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
1926 *buf, len);
1927 if (!IS_ERR(buf)) {
1928 kfree(buf);
1929 err = -EINVAL;
1930 } else {
1931 err = PTR_ERR(buf);
1932 }
1933 } else {
Asutosh Dasb8614aa2018-01-31 15:44:15 +05301934 /*
1935 * 30 bits from bit offset 0 would be read.
1936 * We're interested in bits 28:29
1937 */
1938 val = (*buf >> 28) & 0x3;
Asutosh Das1c43b132018-01-11 18:08:40 +05301939 kfree(buf);
1940 }
1941
1942 nvmem_cell_put(cell);
1943 return err ? err : (long) val;
1944}
1945
1946/* Reads the SoC version */
1947static int sdhci_msm_get_socrev(struct device *dev,
1948 struct sdhci_msm_host *msm_host)
1949{
1950
1951 msm_host->soc_min_rev = qfprom_read(dev, "minor_rev");
1952
1953 if (msm_host->soc_min_rev < 0)
1954 dev_err(dev, "failed getting soc_min_rev, err : %d\n",
1955 msm_host->soc_min_rev);
1956 return msm_host->soc_min_rev;
1957}
1958#else
1959/* Reads the SoC version */
1960static int sdhci_msm_get_socrev(struct device *dev,
1961 struct sdhci_msm_host *msm_host)
1962{
1963 return 0;
1964}
1965#endif
1966
Asutosh Das0ef24812012-12-18 16:14:02 +05301967/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001968static
1969struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1970 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301971{
1972 struct sdhci_msm_pltfm_data *pdata = NULL;
1973 struct device_node *np = dev->of_node;
1974 u32 bus_width = 0;
1975 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301976 int clk_table_len;
1977 u32 *clk_table = NULL;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05301978 int ice_clk_table_len;
1979 u32 *ice_clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301980 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05301981 const char *lower_bus_speed = NULL;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05301982 int bus_clk_table_len;
1983 u32 *bus_clk_table = NULL;
Asutosh Das0ef24812012-12-18 16:14:02 +05301984
1985 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1986 if (!pdata) {
1987 dev_err(dev, "failed to allocate memory for platform data\n");
1988 goto out;
1989 }
1990
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301991 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
Bao D. Nguyen0f5ac952017-06-14 12:42:41 -07001992 if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301993 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301994
Asutosh Das0ef24812012-12-18 16:14:02 +05301995 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1996 if (bus_width == 8)
1997 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1998 else if (bus_width == 4)
1999 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
2000 else {
2001 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
2002 pdata->mmc_bus_width = 0;
2003 }
2004
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002005 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302006 &msm_host->mmc->clk_scaling.pltfm_freq_table,
2007 &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002008 pr_debug("%s: no clock scaling frequencies were supplied\n",
2009 dev_name(dev));
Veerabhadrarao Badigantie5bab462017-05-30 20:34:46 +05302010 else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
2011 !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
2012 dev_err(dev, "bad dts clock scaling frequencies\n");
Talel Shenhar7dc5f792015-05-18 12:12:48 +03002013
Sahitya Tummala08d3caf2015-07-23 13:05:54 +05302014 /*
2015 * Few hosts can support DDR52 mode at the same lower
2016 * system voltage corner as high-speed mode. In such cases,
2017 * it is always better to put it in DDR mode which will
2018 * improve the performance without any power impact.
2019 */
2020 if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
2021 &lower_bus_speed)) {
2022 if (!strcmp(lower_bus_speed, "DDR52"))
2023 msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
2024 MMC_SCALING_LOWER_DDR52_MODE;
2025 }
2026
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302027 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
2028 &clk_table, &clk_table_len, 0)) {
2029 dev_err(dev, "failed parsing supported clock rates\n");
2030 goto out;
2031 }
2032 if (!clk_table || !clk_table_len) {
2033 dev_err(dev, "Invalid clock table\n");
2034 goto out;
2035 }
2036 pdata->sup_clk_table = clk_table;
2037 pdata->sup_clk_cnt = clk_table_len;
2038
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05302039 if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
2040 &bus_clk_table, &bus_clk_table_len, 0)) {
2041 if (bus_clk_table && bus_clk_table_len) {
2042 pdata->bus_clk_table = bus_clk_table;
2043 pdata->bus_clk_cnt = bus_clk_table_len;
2044 }
2045 }
2046
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302047 if (msm_host->ice.pdev) {
2048 if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
2049 &ice_clk_table, &ice_clk_table_len, 0)) {
2050 dev_err(dev, "failed parsing supported ice clock rates\n");
2051 goto out;
2052 }
2053 if (!ice_clk_table || !ice_clk_table_len) {
2054 dev_err(dev, "Invalid clock table\n");
2055 goto out;
2056 }
Sahitya Tummala073ca552015-08-06 13:59:37 +05302057 if (ice_clk_table_len != 2) {
2058 dev_err(dev, "Need max and min frequencies in the table\n");
2059 goto out;
2060 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302061 pdata->sup_ice_clk_table = ice_clk_table;
2062 pdata->sup_ice_clk_cnt = ice_clk_table_len;
Sahitya Tummala073ca552015-08-06 13:59:37 +05302063 pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
2064 pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
2065 dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
2066 pdata->ice_clk_max, pdata->ice_clk_min);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05302067 }
2068
Asutosh Das0ef24812012-12-18 16:14:02 +05302069 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
2070 sdhci_msm_slot_reg_data),
2071 GFP_KERNEL);
2072 if (!pdata->vreg_data) {
2073 dev_err(dev, "failed to allocate memory for vreg data\n");
2074 goto out;
2075 }
2076
2077 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
2078 "vdd")) {
2079 dev_err(dev, "failed parsing vdd data\n");
2080 goto out;
2081 }
2082 if (sdhci_msm_dt_parse_vreg_info(dev,
2083 &pdata->vreg_data->vdd_io_data,
2084 "vdd-io")) {
2085 dev_err(dev, "failed parsing vdd-io data\n");
2086 goto out;
2087 }
2088
2089 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
2090 dev_err(dev, "failed parsing gpio data\n");
2091 goto out;
2092 }
2093
Asutosh Das0ef24812012-12-18 16:14:02 +05302094 len = of_property_count_strings(np, "qcom,bus-speed-mode");
2095
2096 for (i = 0; i < len; i++) {
2097 const char *name = NULL;
2098
2099 of_property_read_string_index(np,
2100 "qcom,bus-speed-mode", i, &name);
2101 if (!name)
2102 continue;
2103
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002104 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
2105 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
2106 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
2107 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
2108 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05302109 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2110 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
2111 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2112 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
2113 pdata->caps |= MMC_CAP_1_8V_DDR
2114 | MMC_CAP_UHS_DDR50;
2115 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
2116 pdata->caps |= MMC_CAP_1_2V_DDR
2117 | MMC_CAP_UHS_DDR50;
2118 }
2119
2120 if (of_get_property(np, "qcom,nonremovable", NULL))
2121 pdata->nonremovable = true;
2122
Guoping Yuf7c91332014-08-20 16:56:18 +08002123 if (of_get_property(np, "qcom,nonhotplug", NULL))
2124 pdata->nonhotplug = true;
2125
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08002126 pdata->largeaddressbus =
2127 of_property_read_bool(np, "qcom,large-address-bus");
2128
Dov Levenglickc9033ab2015-03-10 16:00:56 +02002129 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
2130 msm_host->mmc->wakeup_on_idle = true;
2131
Gilad Bronerc788a672015-09-08 15:39:11 +03002132 sdhci_msm_pm_qos_parse(dev, pdata);
2133
Pavan Anamula5a256df2015-10-16 14:38:28 +05302134 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302135 msm_host->core_3_0v_support = true;
Pavan Anamula5a256df2015-10-16 14:38:28 +05302136
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002137 pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07002138 msm_host->regs_restore.is_supported =
2139 of_property_read_bool(np, "qcom,restore-after-cx-collapse");
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07002140
Vijay Viswanatha5492612017-10-17 15:38:55 +05302141 if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
2142 pdata->rclk_wa = true;
2143
Asutosh Das1c43b132018-01-11 18:08:40 +05302144 /*
2145 * rclk_wa is not required if soc version is mentioned and
2146 * is not base version.
2147 */
2148 if (msm_host->soc_min_rev != 0)
2149 pdata->rclk_wa = false;
2150
Asutosh Das0ef24812012-12-18 16:14:02 +05302151 return pdata;
2152out:
2153 return NULL;
2154}
2155
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302156/* Returns required bandwidth in Bytes per Sec */
2157static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
2158 struct mmc_ios *ios)
2159{
Sahitya Tummala2886c922013-04-03 18:03:31 +05302160 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2161 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2162
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302163 unsigned int bw;
2164
Sahitya Tummala2886c922013-04-03 18:03:31 +05302165 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302166 /*
2167 * For DDR mode, SDCC controller clock will be at
2168 * the double rate than the actual clock that goes to card.
2169 */
2170 if (ios->bus_width == MMC_BUS_WIDTH_4)
2171 bw /= 2;
2172 else if (ios->bus_width == MMC_BUS_WIDTH_1)
2173 bw /= 8;
2174
2175 return bw;
2176}
2177
2178static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
2179 unsigned int bw)
2180{
2181 unsigned int *table = host->pdata->voting_data->bw_vecs;
2182 unsigned int size = host->pdata->voting_data->bw_vecs_size;
2183 int i;
2184
2185 if (host->msm_bus_vote.is_max_bw_needed && bw)
2186 return host->msm_bus_vote.max_bw_vote;
2187
2188 for (i = 0; i < size; i++) {
2189 if (bw <= table[i])
2190 break;
2191 }
2192
2193 if (i && (i == size))
2194 i--;
2195
2196 return i;
2197}
2198
2199/*
2200 * This function must be called with host lock acquired.
2201 * Caller of this function should also ensure that msm bus client
2202 * handle is not null.
2203 */
2204static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
2205 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302206 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302207{
2208 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
2209 int rc = 0;
2210
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302211 BUG_ON(!flags);
2212
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302213 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302214 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302215 rc = msm_bus_scale_client_update_request(
2216 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302217 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302218 if (rc) {
2219 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
2220 mmc_hostname(host->mmc),
2221 msm_host->msm_bus_vote.client_handle, vote, rc);
2222 goto out;
2223 }
2224 msm_host->msm_bus_vote.curr_vote = vote;
2225 }
2226out:
2227 return rc;
2228}
2229
2230/*
2231 * Internal work. Work to set 0 bandwidth for msm bus.
2232 */
2233static void sdhci_msm_bus_work(struct work_struct *work)
2234{
2235 struct sdhci_msm_host *msm_host;
2236 struct sdhci_host *host;
2237 unsigned long flags;
2238
2239 msm_host = container_of(work, struct sdhci_msm_host,
2240 msm_bus_vote.vote_work.work);
2241 host = platform_get_drvdata(msm_host->pdev);
2242
2243 if (!msm_host->msm_bus_vote.client_handle)
2244 return;
2245
2246 spin_lock_irqsave(&host->lock, flags);
2247 /* don't vote for 0 bandwidth if any request is in progress */
2248 if (!host->mrq) {
2249 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302250 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302251 } else
2252 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
2253 mmc_hostname(host->mmc), __func__);
2254 spin_unlock_irqrestore(&host->lock, flags);
2255}
2256
2257/*
2258 * This function cancels any scheduled delayed work and sets the bus
2259 * vote based on bw (bandwidth) argument.
2260 */
2261static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
2262 unsigned int bw)
2263{
2264 int vote;
2265 unsigned long flags;
2266 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2267 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2268
2269 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
2270 spin_lock_irqsave(&host->lock, flags);
2271 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05302272 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302273 spin_unlock_irqrestore(&host->lock, flags);
2274}
2275
2276#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
2277
2278/* This function queues a work which will set the bandwidth requiement to 0 */
2279static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
2280{
2281 unsigned long flags;
2282 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2283 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2284
2285 spin_lock_irqsave(&host->lock, flags);
2286 if (msm_host->msm_bus_vote.min_bw_vote !=
2287 msm_host->msm_bus_vote.curr_vote)
2288 queue_delayed_work(system_wq,
2289 &msm_host->msm_bus_vote.vote_work,
2290 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
2291 spin_unlock_irqrestore(&host->lock, flags);
2292}
2293
2294static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
2295 struct platform_device *pdev)
2296{
2297 int rc = 0;
2298 struct msm_bus_scale_pdata *bus_pdata;
2299
2300 struct sdhci_msm_bus_voting_data *data;
2301 struct device *dev = &pdev->dev;
2302
2303 data = devm_kzalloc(dev,
2304 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
2305 if (!data) {
2306 dev_err(&pdev->dev,
2307 "%s: failed to allocate memory\n", __func__);
2308 rc = -ENOMEM;
2309 goto out;
2310 }
2311 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
2312 if (data->bus_pdata) {
2313 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
2314 &data->bw_vecs, &data->bw_vecs_size, 0);
2315 if (rc) {
2316 dev_err(&pdev->dev,
2317 "%s: Failed to get bus-bw-vectors-bps\n",
2318 __func__);
2319 goto out;
2320 }
2321 host->pdata->voting_data = data;
2322 }
2323 if (host->pdata->voting_data &&
2324 host->pdata->voting_data->bus_pdata &&
2325 host->pdata->voting_data->bw_vecs &&
2326 host->pdata->voting_data->bw_vecs_size) {
2327
2328 bus_pdata = host->pdata->voting_data->bus_pdata;
2329 host->msm_bus_vote.client_handle =
2330 msm_bus_scale_register_client(bus_pdata);
2331 if (!host->msm_bus_vote.client_handle) {
2332 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
2333 rc = -EFAULT;
2334 goto out;
2335 }
2336 /* cache the vote index for minimum and maximum bandwidth */
2337 host->msm_bus_vote.min_bw_vote =
2338 sdhci_msm_bus_get_vote_for_bw(host, 0);
2339 host->msm_bus_vote.max_bw_vote =
2340 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
2341 } else {
2342 devm_kfree(dev, data);
2343 }
2344
2345out:
2346 return rc;
2347}
2348
2349static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
2350{
2351 if (host->msm_bus_vote.client_handle)
2352 msm_bus_scale_unregister_client(
2353 host->msm_bus_vote.client_handle);
2354}
2355
2356static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
2357{
2358 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2359 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2360 struct mmc_ios *ios = &host->mmc->ios;
2361 unsigned int bw;
2362
2363 if (!msm_host->msm_bus_vote.client_handle)
2364 return;
2365
2366 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302367 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302368 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302369 } else {
2370 /*
2371 * If clock gating is enabled, then remove the vote
2372 * immediately because clocks will be disabled only
2373 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
2374 * additional delay is required to remove the bus vote.
2375 */
2376#ifdef CONFIG_MMC_CLKGATE
2377 if (host->mmc->clkgate_delay)
2378 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2379 else
2380#endif
2381 sdhci_msm_bus_queue_work(host);
2382 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302383}
2384
Asutosh Das0ef24812012-12-18 16:14:02 +05302385/* Regulator utility functions */
2386static int sdhci_msm_vreg_init_reg(struct device *dev,
2387 struct sdhci_msm_reg_data *vreg)
2388{
2389 int ret = 0;
2390
2391 /* check if regulator is already initialized? */
2392 if (vreg->reg)
2393 goto out;
2394
2395 /* Get the regulator handle */
2396 vreg->reg = devm_regulator_get(dev, vreg->name);
2397 if (IS_ERR(vreg->reg)) {
2398 ret = PTR_ERR(vreg->reg);
2399 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
2400 __func__, vreg->name, ret);
2401 goto out;
2402 }
2403
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302404 if (regulator_count_voltages(vreg->reg) > 0) {
2405 vreg->set_voltage_sup = true;
2406 /* sanity check */
2407 if (!vreg->high_vol_level || !vreg->hpm_uA) {
2408 pr_err("%s: %s invalid constraints specified\n",
2409 __func__, vreg->name);
2410 ret = -EINVAL;
2411 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302412 }
2413
2414out:
2415 return ret;
2416}
2417
2418static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2419{
2420 if (vreg->reg)
2421 devm_regulator_put(vreg->reg);
2422}
2423
2424static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2425 *vreg, int uA_load)
2426{
2427 int ret = 0;
2428
2429 /*
2430 * regulators that do not support regulator_set_voltage also
2431 * do not support regulator_set_optimum_mode
2432 */
2433 if (vreg->set_voltage_sup) {
2434 ret = regulator_set_load(vreg->reg, uA_load);
2435 if (ret < 0)
2436 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2437 __func__, vreg->name, uA_load, ret);
2438 else
2439 /*
2440 * regulator_set_load() can return non zero
2441 * value even for success case.
2442 */
2443 ret = 0;
2444 }
2445 return ret;
2446}
2447
2448static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2449 int min_uV, int max_uV)
2450{
2451 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302452 if (vreg->set_voltage_sup) {
2453 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2454 if (ret) {
2455 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302456 __func__, vreg->name, min_uV, max_uV, ret);
2457 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302458 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302459
2460 return ret;
2461}
2462
2463static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2464{
2465 int ret = 0;
2466
2467 /* Put regulator in HPM (high power mode) */
2468 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2469 if (ret < 0)
2470 return ret;
2471
2472 if (!vreg->is_enabled) {
2473 /* Set voltage level */
2474 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2475 vreg->high_vol_level);
2476 if (ret)
2477 return ret;
2478 }
2479 ret = regulator_enable(vreg->reg);
2480 if (ret) {
2481 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2482 __func__, vreg->name, ret);
2483 return ret;
2484 }
2485 vreg->is_enabled = true;
2486 return ret;
2487}
2488
2489static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2490{
2491 int ret = 0;
2492
2493 /* Never disable regulator marked as always_on */
2494 if (vreg->is_enabled && !vreg->is_always_on) {
2495 ret = regulator_disable(vreg->reg);
2496 if (ret) {
2497 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2498 __func__, vreg->name, ret);
2499 goto out;
2500 }
2501 vreg->is_enabled = false;
2502
2503 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2504 if (ret < 0)
2505 goto out;
2506
2507 /* Set min. voltage level to 0 */
2508 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2509 if (ret)
2510 goto out;
2511 } else if (vreg->is_enabled && vreg->is_always_on) {
2512 if (vreg->lpm_sup) {
2513 /* Put always_on regulator in LPM (low power mode) */
2514 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2515 vreg->lpm_uA);
2516 if (ret < 0)
2517 goto out;
2518 }
2519 }
2520out:
2521 return ret;
2522}
2523
2524static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2525 bool enable, bool is_init)
2526{
2527 int ret = 0, i;
2528 struct sdhci_msm_slot_reg_data *curr_slot;
2529 struct sdhci_msm_reg_data *vreg_table[2];
2530
2531 curr_slot = pdata->vreg_data;
2532 if (!curr_slot) {
2533 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2534 __func__);
2535 goto out;
2536 }
2537
2538 vreg_table[0] = curr_slot->vdd_data;
2539 vreg_table[1] = curr_slot->vdd_io_data;
2540
2541 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2542 if (vreg_table[i]) {
2543 if (enable)
2544 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2545 else
2546 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2547 if (ret)
2548 goto out;
2549 }
2550 }
2551out:
2552 return ret;
2553}
2554
Asutosh Das0ef24812012-12-18 16:14:02 +05302555/* This init function should be called only once for each SDHC slot */
2556static int sdhci_msm_vreg_init(struct device *dev,
2557 struct sdhci_msm_pltfm_data *pdata,
2558 bool is_init)
2559{
2560 int ret = 0;
2561 struct sdhci_msm_slot_reg_data *curr_slot;
2562 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2563
2564 curr_slot = pdata->vreg_data;
2565 if (!curr_slot)
2566 goto out;
2567
2568 curr_vdd_reg = curr_slot->vdd_data;
2569 curr_vdd_io_reg = curr_slot->vdd_io_data;
2570
2571 if (!is_init)
2572 /* Deregister all regulators from regulator framework */
2573 goto vdd_io_reg_deinit;
2574
2575 /*
2576 * Get the regulator handle from voltage regulator framework
2577 * and then try to set the voltage level for the regulator
2578 */
2579 if (curr_vdd_reg) {
2580 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2581 if (ret)
2582 goto out;
2583 }
2584 if (curr_vdd_io_reg) {
2585 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2586 if (ret)
2587 goto vdd_reg_deinit;
2588 }
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302589
Asutosh Das0ef24812012-12-18 16:14:02 +05302590 if (ret)
2591 dev_err(dev, "vreg reset failed (%d)\n", ret);
2592 goto out;
2593
2594vdd_io_reg_deinit:
2595 if (curr_vdd_io_reg)
2596 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2597vdd_reg_deinit:
2598 if (curr_vdd_reg)
2599 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2600out:
2601 return ret;
2602}
2603
2604
2605static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2606 enum vdd_io_level level,
2607 unsigned int voltage_level)
2608{
2609 int ret = 0;
2610 int set_level;
2611 struct sdhci_msm_reg_data *vdd_io_reg;
2612
2613 if (!pdata->vreg_data)
2614 return ret;
2615
2616 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2617 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2618 switch (level) {
2619 case VDD_IO_LOW:
2620 set_level = vdd_io_reg->low_vol_level;
2621 break;
2622 case VDD_IO_HIGH:
2623 set_level = vdd_io_reg->high_vol_level;
2624 break;
2625 case VDD_IO_SET_LEVEL:
2626 set_level = voltage_level;
2627 break;
2628 default:
2629 pr_err("%s: invalid argument level = %d",
2630 __func__, level);
2631 ret = -EINVAL;
2632 return ret;
2633 }
2634 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2635 set_level);
2636 }
2637 return ret;
2638}
2639
Ritesh Harjani42876f42015-11-17 17:46:51 +05302640/*
2641 * Acquire spin-lock host->lock before calling this function
2642 */
2643static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
2644 bool enable)
2645{
2646 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2647 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2648
2649 if (enable && !msm_host->is_sdiowakeup_enabled)
2650 enable_irq(msm_host->pdata->sdiowakeup_irq);
2651 else if (!enable && msm_host->is_sdiowakeup_enabled)
2652 disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
2653 else
2654 dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
2655 __func__, enable, msm_host->is_sdiowakeup_enabled);
2656 msm_host->is_sdiowakeup_enabled = enable;
2657}
2658
2659static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
2660{
2661 struct sdhci_host *host = (struct sdhci_host *)data;
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302662 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2663 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2664
Ritesh Harjani42876f42015-11-17 17:46:51 +05302665 unsigned long flags;
2666
2667 pr_debug("%s: irq (%d) received\n", __func__, irq);
2668
2669 spin_lock_irqsave(&host->lock, flags);
2670 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
2671 spin_unlock_irqrestore(&host->lock, flags);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05302672 msm_host->sdio_pending_processing = true;
Ritesh Harjani42876f42015-11-17 17:46:51 +05302673
2674 return IRQ_HANDLED;
2675}
2676
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302677void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2678{
2679 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2680 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302681 const struct sdhci_msm_offset *msm_host_offset =
2682 msm_host->offset;
Siba Prasad0196fe42017-06-27 15:13:27 +05302683 unsigned int irq_flags = 0;
2684 struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302685
Siba Prasad0196fe42017-06-27 15:13:27 +05302686 if (pwr_irq_desc)
2687 irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
2688 state_use_accessors);
2689
2690 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302691 mmc_hostname(host->mmc),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302692 sdhci_msm_readl_relaxed(host,
2693 msm_host_offset->CORE_PWRCTL_STATUS),
2694 sdhci_msm_readl_relaxed(host,
2695 msm_host_offset->CORE_PWRCTL_MASK),
2696 sdhci_msm_readl_relaxed(host,
Siba Prasad0196fe42017-06-27 15:13:27 +05302697 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
2698
2699 MMC_TRACE(host->mmc,
2700 "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
2701 __func__,
2702 sdhci_msm_readb_relaxed(host,
2703 msm_host_offset->CORE_PWRCTL_STATUS),
2704 sdhci_msm_readb_relaxed(host,
2705 msm_host_offset->CORE_PWRCTL_MASK),
2706 sdhci_msm_readb_relaxed(host,
2707 msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302708}
2709
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002710static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value)
2711{
2712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2713 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2714 const struct sdhci_msm_offset *msm_host_offset = msm_host->offset;
2715 int ret = 0, retry = 10;
2716
2717 /*
2718 * There is a rare HW scenario where the first clear pulse could be
2719 * lost when actual reset and clear/read of status register is
2720 * happening at a time. Hence, retry for at least 10 times to make
2721 * sure status register is cleared. Otherwise, this will result in
2722 * a spurious power IRQ resulting in system instability.
2723 */
2724 do {
2725 if (retry == 0) {
2726 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2727 mmc_hostname(host->mmc), value);
2728 sdhci_msm_dump_pwr_ctrl_regs(host);
2729 WARN_ON(1);
2730 ret = -EBUSY;
2731 break;
2732 }
2733
2734 /*
2735 * Clear the PWRCTL_STATUS interrupt bits by writing to the
2736 * corresponding bits in the PWRCTL_CLEAR register.
2737 */
2738 sdhci_msm_writeb_relaxed(value, host,
2739 msm_host_offset->CORE_PWRCTL_CLEAR);
2740 /*
2741 * SDHC has core_mem and hc_mem device memory and these memory
2742 * addresses do not fall within 1KB region. Hence, any update
2743 * to core_mem address space would require an mb() to ensure
2744 * this gets completed before its next update to registers
2745 * within hc_mem.
2746 */
2747 mb();
2748 retry--;
2749 udelay(10);
2750 } while (value & sdhci_msm_readb_relaxed(host,
2751 msm_host_offset->CORE_PWRCTL_STATUS));
2752
2753 return ret;
2754}
2755
Asutosh Das0ef24812012-12-18 16:14:02 +05302756static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2757{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002758 struct sdhci_host *host = (struct sdhci_host *)data;
2759 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2760 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302761 const struct sdhci_msm_offset *msm_host_offset =
2762 msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05302763 u8 irq_status = 0;
2764 u8 irq_ack = 0;
2765 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302766 int pwr_state = 0, io_level = 0;
2767 unsigned long flags;
Asutosh Das0ef24812012-12-18 16:14:02 +05302768
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302769 irq_status = sdhci_msm_readb_relaxed(host,
2770 msm_host_offset->CORE_PWRCTL_STATUS);
2771
Asutosh Das0ef24812012-12-18 16:14:02 +05302772 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2773 mmc_hostname(msm_host->mmc), irq, irq_status);
2774
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08002775 sdhci_msm_clear_pwrctl_status(host, irq_status);
Asutosh Das0ef24812012-12-18 16:14:02 +05302776
2777 /* Handle BUS ON/OFF*/
2778 if (irq_status & CORE_PWRCTL_BUS_ON) {
2779 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302780 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302781 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302782 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2783 VDD_IO_HIGH, 0);
2784 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302785 if (ret)
2786 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2787 else
2788 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302789
2790 pwr_state = REQ_BUS_ON;
2791 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302792 }
2793 if (irq_status & CORE_PWRCTL_BUS_OFF) {
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05302794 if (msm_host->pltfm_init_done)
2795 ret = sdhci_msm_setup_vreg(msm_host->pdata,
2796 false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302797 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302798 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302799 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2800 VDD_IO_LOW, 0);
2801 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302802 if (ret)
2803 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2804 else
2805 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302806
2807 pwr_state = REQ_BUS_OFF;
2808 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302809 }
2810 /* Handle IO LOW/HIGH */
2811 if (irq_status & CORE_PWRCTL_IO_LOW) {
2812 /* Switch voltage Low */
2813 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2814 if (ret)
2815 irq_ack |= CORE_PWRCTL_IO_FAIL;
2816 else
2817 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302818
2819 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302820 }
2821 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2822 /* Switch voltage High */
2823 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2824 if (ret)
2825 irq_ack |= CORE_PWRCTL_IO_FAIL;
2826 else
2827 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302828
2829 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302830 }
2831
2832 /* ACK status to the core */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302833 sdhci_msm_writeb_relaxed(irq_ack, host,
2834 msm_host_offset->CORE_PWRCTL_CTL);
Asutosh Das0ef24812012-12-18 16:14:02 +05302835 /*
2836 * SDHC has core_mem and hc_mem device memory and these memory
2837 * addresses do not fall within 1KB region. Hence, any update to
2838 * core_mem address space would require an mb() to ensure this gets
2839 * completed before its next update to registers within hc_mem.
2840 */
2841 mb();
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05302842 if ((io_level & REQ_IO_HIGH) &&
2843 (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
2844 !msm_host->core_3_0v_support)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302845 writel_relaxed((readl_relaxed(host->ioaddr +
2846 msm_host_offset->CORE_VENDOR_SPEC) &
2847 ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2848 msm_host_offset->CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002849 else if ((io_level & REQ_IO_LOW) ||
2850 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302851 writel_relaxed((readl_relaxed(host->ioaddr +
2852 msm_host_offset->CORE_VENDOR_SPEC) |
2853 CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
2854 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002855 mb();
2856
Asutosh Das0ef24812012-12-18 16:14:02 +05302857 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2858 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302859 spin_lock_irqsave(&host->lock, flags);
2860 if (pwr_state)
2861 msm_host->curr_pwr_state = pwr_state;
2862 if (io_level)
2863 msm_host->curr_io_level = io_level;
2864 complete(&msm_host->pwr_irq_completion);
2865 spin_unlock_irqrestore(&host->lock, flags);
2866
Asutosh Das0ef24812012-12-18 16:14:02 +05302867 return IRQ_HANDLED;
2868}
2869
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302870static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302871show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2872{
2873 struct sdhci_host *host = dev_get_drvdata(dev);
2874 int poll;
2875 unsigned long flags;
2876
2877 spin_lock_irqsave(&host->lock, flags);
2878 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2879 spin_unlock_irqrestore(&host->lock, flags);
2880
2881 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2882}
2883
2884static ssize_t
2885store_polling(struct device *dev, struct device_attribute *attr,
2886 const char *buf, size_t count)
2887{
2888 struct sdhci_host *host = dev_get_drvdata(dev);
2889 int value;
2890 unsigned long flags;
2891
2892 if (!kstrtou32(buf, 0, &value)) {
2893 spin_lock_irqsave(&host->lock, flags);
2894 if (value) {
2895 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2896 mmc_detect_change(host->mmc, 0);
2897 } else {
2898 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2899 }
2900 spin_unlock_irqrestore(&host->lock, flags);
2901 }
2902 return count;
2903}
2904
2905static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302906show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2907 char *buf)
2908{
2909 struct sdhci_host *host = dev_get_drvdata(dev);
2910 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2911 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2912
2913 return snprintf(buf, PAGE_SIZE, "%u\n",
2914 msm_host->msm_bus_vote.is_max_bw_needed);
2915}
2916
2917static ssize_t
2918store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2919 const char *buf, size_t count)
2920{
2921 struct sdhci_host *host = dev_get_drvdata(dev);
2922 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2923 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2924 uint32_t value;
2925 unsigned long flags;
2926
2927 if (!kstrtou32(buf, 0, &value)) {
2928 spin_lock_irqsave(&host->lock, flags);
2929 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2930 spin_unlock_irqrestore(&host->lock, flags);
2931 }
2932 return count;
2933}
2934
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302935static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302936{
2937 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2938 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302939 const struct sdhci_msm_offset *msm_host_offset =
2940 msm_host->offset;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302941 unsigned long flags;
2942 bool done = false;
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302943 u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
Asutosh Das0ef24812012-12-18 16:14:02 +05302944
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302945 spin_lock_irqsave(&host->lock, flags);
2946 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2947 mmc_hostname(host->mmc), __func__, req_type,
2948 msm_host->curr_pwr_state, msm_host->curr_io_level);
Veerabhadrarao Badiganti88fdead2017-03-27 22:46:20 +05302949 if (!msm_host->mci_removed)
2950 io_sig_sts = sdhci_msm_readl_relaxed(host,
2951 msm_host_offset->CORE_GENERICS);
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05302952
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302953 /*
2954 * The IRQ for request type IO High/Low will be generated when -
2955 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2956 * 2. If 1 is true and when there is a state change in 1.8V enable
2957 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2958 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2959 * layer tries to set it to 3.3V before card detection happens, the
2960 * IRQ doesn't get triggered as there is no state change in this bit.
2961 * The driver already handles this case by changing the IO voltage
2962 * level to high as part of controller power up sequence. Hence, check
2963 * for host->pwr to handle a case where IO voltage high request is
2964 * issued even before controller power up.
2965 */
2966 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2967 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2968 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2969 pr_debug("%s: do not wait for power IRQ that never comes\n",
2970 mmc_hostname(host->mmc));
2971 spin_unlock_irqrestore(&host->lock, flags);
2972 return;
2973 }
2974 }
2975
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302976 if ((req_type & msm_host->curr_pwr_state) ||
2977 (req_type & msm_host->curr_io_level))
2978 done = true;
2979 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302980
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302981 /*
2982 * This is needed here to hanlde a case where IRQ gets
2983 * triggered even before this function is called so that
2984 * x->done counter of completion gets reset. Otherwise,
2985 * next call to wait_for_completion returns immediately
2986 * without actually waiting for the IRQ to be handled.
2987 */
2988 if (done)
2989 init_completion(&msm_host->pwr_irq_completion);
Ritesh Harjani82124772014-11-04 15:34:00 +05302990 else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
Siba Prasad0196fe42017-06-27 15:13:27 +05302991 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
Ritesh Harjani82124772014-11-04 15:34:00 +05302992 __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
2993 mmc_hostname(host->mmc), req_type);
Siba Prasad0196fe42017-06-27 15:13:27 +05302994 MMC_TRACE(host->mmc,
2995 "%s: request(%d) timed out waiting for pwr_irq\n",
2996 __func__, req_type);
2997 sdhci_msm_dump_pwr_ctrl_regs(host);
2998 }
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302999 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
3000 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05303001}
3002
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003003static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
3004{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3006 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3007 const struct sdhci_msm_offset *msm_host_offset =
3008 msm_host->offset;
3009 u32 config = readl_relaxed(host->ioaddr +
3010 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303011
3012 if (enable) {
3013 config |= CORE_CDR_EN;
3014 config &= ~CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303015 writel_relaxed(config, host->ioaddr +
3016 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303017 } else {
3018 config &= ~CORE_CDR_EN;
3019 config |= CORE_CDR_EXT_EN;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303020 writel_relaxed(config, host->ioaddr +
3021 msm_host_offset->CORE_DLL_CONFIG);
Ritesh Harjani8e36f662014-11-14 11:09:56 +05303022 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003023}
3024
Asutosh Das648f9d12013-01-10 21:11:04 +05303025static unsigned int sdhci_msm_max_segs(void)
3026{
3027 return SDHCI_MSM_MAX_SEGMENTS;
3028}
3029
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303030static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303031{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303032 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3033 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303034
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303035 return msm_host->pdata->sup_clk_table[0];
3036}
3037
3038static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
3039{
3040 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3041 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3042 int max_clk_index = msm_host->pdata->sup_clk_cnt;
3043
3044 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
3045}
3046
3047static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
3048 u32 req_clk)
3049{
3050 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3051 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3052 unsigned int sel_clk = -1;
3053 unsigned char cnt;
3054
3055 if (req_clk < sdhci_msm_get_min_clock(host)) {
3056 sel_clk = sdhci_msm_get_min_clock(host);
3057 return sel_clk;
3058 }
3059
3060 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
3061 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
3062 break;
3063 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
3064 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3065 break;
3066 } else {
3067 sel_clk = msm_host->pdata->sup_clk_table[cnt];
3068 }
3069 }
3070 return sel_clk;
3071}
3072
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303073static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
3074 u32 apps_clk)
3075{
3076 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3077 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3078 long sel_clk = -1;
3079 unsigned char cnt;
3080
3081 if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
3082 pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
3083 mmc_hostname(host->mmc), __func__,
3084 (unsigned int)msm_host->pdata->bus_clk_cnt,
3085 (unsigned int)msm_host->pdata->sup_clk_cnt);
3086 return msm_host->pdata->bus_clk_table[0];
3087 }
3088 if (apps_clk == sdhci_msm_get_min_clock(host)) {
3089 sel_clk = msm_host->pdata->bus_clk_table[0];
3090 return sel_clk;
3091 }
3092
3093 for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
3094 if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
3095 break;
3096 sel_clk = msm_host->pdata->bus_clk_table[cnt];
3097 }
3098 return sel_clk;
3099}
3100
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003101static void sdhci_msm_registers_save(struct sdhci_host *host)
3102{
3103 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3104 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3105 const struct sdhci_msm_offset *msm_host_offset =
3106 msm_host->offset;
3107
3108 if (!msm_host->regs_restore.is_supported)
3109 return;
3110
3111 msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
3112 msm_host_offset->CORE_VENDOR_SPEC);
3113 msm_host->regs_restore.vendor_pwrctl_mask =
3114 readl_relaxed(host->ioaddr +
3115 msm_host_offset->CORE_PWRCTL_MASK);
3116 msm_host->regs_restore.vendor_func2 =
3117 readl_relaxed(host->ioaddr +
3118 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3119 msm_host->regs_restore.vendor_func3 =
3120 readl_relaxed(host->ioaddr +
3121 msm_host_offset->CORE_VENDOR_SPEC3);
3122 msm_host->regs_restore.hc_2c_2e =
3123 sdhci_readl(host, SDHCI_CLOCK_CONTROL);
3124 msm_host->regs_restore.hc_3c_3e =
3125 sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
3126 msm_host->regs_restore.vendor_pwrctl_ctl =
3127 readl_relaxed(host->ioaddr +
3128 msm_host_offset->CORE_PWRCTL_CTL);
3129 msm_host->regs_restore.hc_38_3a =
3130 sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
3131 msm_host->regs_restore.hc_34_36 =
3132 sdhci_readl(host, SDHCI_INT_ENABLE);
3133 msm_host->regs_restore.hc_28_2a =
3134 sdhci_readl(host, SDHCI_HOST_CONTROL);
3135 msm_host->regs_restore.vendor_caps_0 =
3136 readl_relaxed(host->ioaddr +
3137 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3138 msm_host->regs_restore.hc_caps_1 =
3139 sdhci_readl(host, SDHCI_CAPABILITIES_1);
3140 msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
3141 msm_host_offset->CORE_TESTBUS_CONFIG);
3142 msm_host->regs_restore.is_valid = true;
3143
3144 pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
3145 mmc_hostname(host->mmc), __func__,
3146 readl_relaxed(host->ioaddr +
3147 msm_host_offset->CORE_PWRCTL_MASK));
3148}
3149
3150static void sdhci_msm_registers_restore(struct sdhci_host *host)
3151{
3152 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3153 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003154 u8 irq_status;
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003155 const struct sdhci_msm_offset *msm_host_offset =
3156 msm_host->offset;
3157
3158 if (!msm_host->regs_restore.is_supported ||
3159 !msm_host->regs_restore.is_valid)
3160 return;
3161
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003162 writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003163 writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
3164 msm_host_offset->CORE_VENDOR_SPEC);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003165 writel_relaxed(msm_host->regs_restore.vendor_func2,
3166 host->ioaddr +
3167 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
3168 writel_relaxed(msm_host->regs_restore.vendor_func3,
3169 host->ioaddr +
3170 msm_host_offset->CORE_VENDOR_SPEC3);
3171 sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
3172 SDHCI_CLOCK_CONTROL);
3173 sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
3174 SDHCI_AUTO_CMD_ERR);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003175 sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
3176 SDHCI_SIGNAL_ENABLE);
3177 sdhci_writel(host, msm_host->regs_restore.hc_34_36,
3178 SDHCI_INT_ENABLE);
3179 sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
3180 SDHCI_HOST_CONTROL);
3181 writel_relaxed(msm_host->regs_restore.vendor_caps_0,
3182 host->ioaddr +
3183 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
3184 sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
3185 SDHCI_CAPABILITIES_1);
3186 writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
3187 msm_host_offset->CORE_TESTBUS_CONFIG);
3188 msm_host->regs_restore.is_valid = false;
3189
Bao D. Nguyencb045dd2018-02-14 15:41:45 -08003190 /*
3191 * Clear the PWRCTL_STATUS register.
3192 * There is a rare HW scenario where the first clear pulse could be
3193 * lost when actual reset and clear/read of status register is
3194 * happening at a time. Hence, retry for at least 10 times to make
3195 * sure status register is cleared. Otherwise, this will result in
3196 * a spurious power IRQ resulting in system instability.
3197 */
3198 irq_status = sdhci_msm_readb_relaxed(host,
3199 msm_host_offset->CORE_PWRCTL_STATUS);
3200
3201 sdhci_msm_clear_pwrctl_status(host, irq_status);
3202
3203 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
3204 host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
3205 writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
3206 host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
3207
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003208 pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
3209 mmc_hostname(host->mmc), __func__,
3210 readl_relaxed(host->ioaddr +
3211 msm_host_offset->CORE_PWRCTL_MASK));
3212}
3213
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303214static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
3215{
3216 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3217 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3218 int rc = 0;
3219
3220 if (atomic_read(&msm_host->controller_clock))
3221 return 0;
3222
3223 sdhci_msm_bus_voting(host, 1);
3224
3225 if (!IS_ERR(msm_host->pclk)) {
3226 rc = clk_prepare_enable(msm_host->pclk);
3227 if (rc) {
3228 pr_err("%s: %s: failed to enable the pclk with error %d\n",
3229 mmc_hostname(host->mmc), __func__, rc);
3230 goto remove_vote;
3231 }
3232 }
3233
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303234 if (!IS_ERR(msm_host->bus_aggr_clk)) {
3235 rc = clk_prepare_enable(msm_host->bus_aggr_clk);
3236 if (rc) {
3237 pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
3238 mmc_hostname(host->mmc), __func__, rc);
3239 goto disable_pclk;
3240 }
3241 }
3242
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303243 rc = clk_prepare_enable(msm_host->clk);
3244 if (rc) {
3245 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
3246 mmc_hostname(host->mmc), __func__, rc);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303247 goto disable_bus_aggr_clk;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303248 }
3249
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303250 if (!IS_ERR(msm_host->ice_clk)) {
3251 rc = clk_prepare_enable(msm_host->ice_clk);
3252 if (rc) {
3253 pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
3254 mmc_hostname(host->mmc), __func__, rc);
3255 goto disable_host_clk;
3256 }
3257 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303258 atomic_set(&msm_host->controller_clock, 1);
3259 pr_debug("%s: %s: enabled controller clock\n",
3260 mmc_hostname(host->mmc), __func__);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003261 sdhci_msm_registers_restore(host);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303262 goto out;
3263
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303264disable_host_clk:
3265 if (!IS_ERR(msm_host->clk))
3266 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303267disable_bus_aggr_clk:
3268 if (!IS_ERR(msm_host->bus_aggr_clk))
3269 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303270disable_pclk:
3271 if (!IS_ERR(msm_host->pclk))
3272 clk_disable_unprepare(msm_host->pclk);
3273remove_vote:
3274 if (msm_host->msm_bus_vote.client_handle)
3275 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3276out:
3277 return rc;
3278}
3279
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303280static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
3281{
3282 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3283 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303284
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303285 if (atomic_read(&msm_host->controller_clock)) {
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003286 sdhci_msm_registers_save(host);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303287 if (!IS_ERR(msm_host->clk))
3288 clk_disable_unprepare(msm_host->clk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303289 if (!IS_ERR(msm_host->ice_clk))
3290 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303291 if (!IS_ERR(msm_host->bus_aggr_clk))
3292 clk_disable_unprepare(msm_host->bus_aggr_clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303293 if (!IS_ERR(msm_host->pclk))
3294 clk_disable_unprepare(msm_host->pclk);
Sayali Lokhandeb30295162016-11-18 16:05:50 +05303295 sdhci_msm_bus_voting(host, 0);
3296 atomic_set(&msm_host->controller_clock, 0);
3297 pr_debug("%s: %s: disabled controller clock\n",
3298 mmc_hostname(host->mmc), __func__);
3299 }
3300}
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303301
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303302static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
3303{
3304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3305 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3306 int rc = 0;
3307
3308 if (enable && !atomic_read(&msm_host->clks_on)) {
3309 pr_debug("%s: request to enable clocks\n",
3310 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303311
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303312 /*
3313 * The bus-width or the clock rate might have changed
3314 * after controller clocks are enbaled, update bus vote
3315 * in such case.
3316 */
3317 if (atomic_read(&msm_host->controller_clock))
3318 sdhci_msm_bus_voting(host, 1);
3319
3320 rc = sdhci_msm_enable_controller_clock(host);
3321 if (rc)
3322 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303323
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303324 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3325 rc = clk_prepare_enable(msm_host->bus_clk);
3326 if (rc) {
3327 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
3328 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303329 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303330 }
3331 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003332 if (!IS_ERR(msm_host->ff_clk)) {
3333 rc = clk_prepare_enable(msm_host->ff_clk);
3334 if (rc) {
3335 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
3336 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303337 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003338 }
3339 }
3340 if (!IS_ERR(msm_host->sleep_clk)) {
3341 rc = clk_prepare_enable(msm_host->sleep_clk);
3342 if (rc) {
3343 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
3344 mmc_hostname(host->mmc), __func__, rc);
3345 goto disable_ff_clk;
3346 }
3347 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303348 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303349
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303350 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303351 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
3352 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05303353 /*
3354 * During 1.8V signal switching the clock source must
3355 * still be ON as it requires accessing SDHC
3356 * registers (SDHCi host control2 register bit 3 must
3357 * be written and polled after stopping the SDCLK).
3358 */
3359 if (host->mmc->card_clock_off)
3360 return 0;
3361 pr_debug("%s: request to disable clocks\n",
3362 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003363 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
3364 clk_disable_unprepare(msm_host->sleep_clk);
3365 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3366 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303367 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3368 clk_disable_unprepare(msm_host->bus_clk);
Bao D. Nguyen6fa49fe2017-06-29 20:38:49 -07003369 sdhci_msm_disable_controller_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303370 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303371 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303372 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003373disable_ff_clk:
3374 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
3375 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303376disable_bus_clk:
3377 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
3378 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303379disable_controller_clk:
3380 if (!IS_ERR_OR_NULL(msm_host->clk))
3381 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303382 if (!IS_ERR(msm_host->ice_clk))
3383 clk_disable_unprepare(msm_host->ice_clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05303384 if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
3385 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303386 if (!IS_ERR_OR_NULL(msm_host->pclk))
3387 clk_disable_unprepare(msm_host->pclk);
3388 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303389remove_vote:
3390 if (msm_host->msm_bus_vote.client_handle)
3391 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303392out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303393 return rc;
3394}
3395
3396static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
3397{
3398 int rc;
3399 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3400 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303401 const struct sdhci_msm_offset *msm_host_offset =
3402 msm_host->offset;
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003403 struct mmc_card *card = host->mmc->card;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303404 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003405 u32 sup_clock, ddr_clock, dll_lock;
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303406 long bus_clk_rate;
Sahitya Tummala043744a2013-06-24 09:55:33 +05303407 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303408
3409 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05303410 /*
3411 * disable pwrsave to ensure clock is not auto-gated until
3412 * the rate is >400KHz (initialization complete).
3413 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303414 writel_relaxed(readl_relaxed(host->ioaddr +
3415 msm_host_offset->CORE_VENDOR_SPEC) &
3416 ~CORE_CLK_PWRSAVE, host->ioaddr +
3417 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303418 sdhci_msm_prepare_clocks(host, false);
3419 host->clock = clock;
3420 goto out;
3421 }
3422
3423 rc = sdhci_msm_prepare_clocks(host, true);
3424 if (rc)
3425 goto out;
3426
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303427 curr_pwrsave = !!(readl_relaxed(host->ioaddr +
3428 msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05303429 if ((clock > 400000) &&
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003430 !curr_pwrsave && card && mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303431 writel_relaxed(readl_relaxed(host->ioaddr +
3432 msm_host_offset->CORE_VENDOR_SPEC)
3433 | CORE_CLK_PWRSAVE, host->ioaddr +
3434 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303435 /*
3436 * Disable pwrsave for a newly added card if doesn't allow clock
3437 * gating.
3438 */
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003439 else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303440 writel_relaxed(readl_relaxed(host->ioaddr +
3441 msm_host_offset->CORE_VENDOR_SPEC)
3442 & ~CORE_CLK_PWRSAVE, host->ioaddr +
3443 msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala043744a2013-06-24 09:55:33 +05303444
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303445 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003446 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003447 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003448 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303449 /*
3450 * The SDHC requires internal clock frequency to be double the
3451 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003452 * uses the faster clock(100/400MHz) for some of its parts and
3453 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303454 */
3455 ddr_clock = clock * 2;
3456 sup_clock = sdhci_msm_get_sup_clk_rate(host,
3457 ddr_clock);
3458 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003459
3460 /*
3461 * In general all timing modes are controlled via UHS mode select in
3462 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
3463 * their respective modes defined here, hence we use these values.
3464 *
3465 * HS200 - SDR104 (Since they both are equivalent in functionality)
3466 * HS400 - This involves multiple configurations
3467 * Initially SDR104 - when tuning is required as HS200
3468 * Then when switching to DDR @ 400MHz (HS400) we use
3469 * the vendor specific HC_SELECT_IN to control the mode.
3470 *
3471 * In addition to controlling the modes we also need to select the
3472 * correct input clock for DLL depending on the mode.
3473 *
3474 * HS400 - divided clock (free running MCLK/2)
3475 * All other modes - default (free running MCLK)
3476 */
3477 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
3478 /* Select the divided clock (free running MCLK/2) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303479 writel_relaxed(((readl_relaxed(host->ioaddr +
3480 msm_host_offset->CORE_VENDOR_SPEC)
3481 & ~CORE_HC_MCLK_SEL_MASK)
3482 | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
3483 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003484 /*
3485 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
3486 * register
3487 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303488 if ((msm_host->tuning_done ||
Subhash Jadavanic1e97552016-06-17 18:44:14 -07003489 (card && mmc_card_strobe(card) &&
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303490 msm_host->enhanced_strobe)) &&
3491 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003492 /*
3493 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
3494 * field in VENDOR_SPEC_FUNC
3495 */
3496 writel_relaxed((readl_relaxed(host->ioaddr + \
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303497 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003498 | CORE_HC_SELECT_IN_HS400
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303499 | CORE_HC_SELECT_IN_EN), host->ioaddr +
3500 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003501 }
Ram Prakash Gupta20b8ca12018-04-16 11:17:22 +05303502 /*
3503 * After MCLK ugating, toggle the FIFO write clock to get
3504 * the FIFO pointers and flags to valid state.
3505 */
3506 if (msm_host->tuning_done ||
3507 (card && mmc_card_strobe(card) &&
3508 msm_host->enhanced_strobe)) {
3509 /*
3510 * set HC_REG_DLL_CONFIG_3[1] to select MCLK as
3511 * DLL input clock
3512 */
3513 writel_relaxed(((readl_relaxed(host->ioaddr +
3514 msm_host_offset->CORE_DDR_CONFIG))
3515 | RCLK_TOGGLE), host->ioaddr +
3516 msm_host_offset->CORE_DDR_CONFIG);
3517 /* ensure above write as toggling same bit quickly */
3518 wmb();
3519 udelay(2);
3520 /*
3521 * clear HC_REG_DLL_CONFIG_3[1] to select RCLK as
3522 * DLL input clock
3523 */
3524 writel_relaxed(((readl_relaxed(host->ioaddr +
3525 msm_host_offset->CORE_DDR_CONFIG))
3526 & ~RCLK_TOGGLE), host->ioaddr +
3527 msm_host_offset->CORE_DDR_CONFIG);
3528 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003529 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
3530 /*
3531 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
3532 * CORE_DLL_STATUS to be set. This should get set
3533 * with in 15 us at 200 MHz.
3534 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303535 rc = readl_poll_timeout(host->ioaddr +
3536 msm_host_offset->CORE_DLL_STATUS,
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003537 dll_lock, (dll_lock & (CORE_DLL_LOCK |
3538 CORE_DDR_DLL_LOCK)), 10, 1000);
3539 if (rc == -ETIMEDOUT)
3540 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
3541 mmc_hostname(host->mmc),
3542 dll_lock);
3543 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003544 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003545 if (!msm_host->use_cdclp533)
3546 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
3547 writel_relaxed((readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303548 msm_host_offset->CORE_VENDOR_SPEC3)
3549 & ~CORE_PWRSAVE_DLL), host->ioaddr +
3550 msm_host_offset->CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003551
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003552 /* Select the default clock (free running MCLK) */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303553 writel_relaxed(((readl_relaxed(host->ioaddr +
3554 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003555 & ~CORE_HC_MCLK_SEL_MASK)
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303556 | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
3557 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003558
3559 /*
3560 * Disable HC_SELECT_IN to be able to use the UHS mode select
3561 * configuration from Host Control2 register for all other
3562 * modes.
3563 *
3564 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
3565 * in VENDOR_SPEC_FUNC
3566 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303567 writel_relaxed((readl_relaxed(host->ioaddr +
3568 msm_host_offset->CORE_VENDOR_SPEC)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003569 & ~CORE_HC_SELECT_IN_EN
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303570 & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
3571 msm_host_offset->CORE_VENDOR_SPEC);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003572 }
3573 mb();
3574
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303575 if (sup_clock != msm_host->clk_rate) {
3576 pr_debug("%s: %s: setting clk rate to %u\n",
3577 mmc_hostname(host->mmc), __func__, sup_clock);
3578 rc = clk_set_rate(msm_host->clk, sup_clock);
3579 if (rc) {
3580 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
3581 mmc_hostname(host->mmc), __func__,
3582 sup_clock, rc);
3583 goto out;
3584 }
3585 msm_host->clk_rate = sup_clock;
3586 host->clock = clock;
Can Guob903ad82017-10-17 13:22:53 +08003587
Vijay Viswanathc9e2c0f2017-11-09 15:43:25 +05303588 if (!IS_ERR(msm_host->bus_aggr_clk) &&
3589 msm_host->pdata->bus_clk_cnt) {
3590 bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
3591 sup_clock);
3592 if (bus_clk_rate >= 0) {
3593 rc = clk_set_rate(msm_host->bus_aggr_clk,
3594 bus_clk_rate);
3595 if (rc) {
3596 pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
3597 mmc_hostname(host->mmc),
3598 __func__, bus_clk_rate, rc);
3599 goto out;
3600 }
3601 } else {
3602 pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
3603 mmc_hostname(host->mmc), __func__,
3604 sup_clock, bus_clk_rate);
3605 }
3606 }
3607
Can Guob903ad82017-10-17 13:22:53 +08003608 /* Configure pinctrl drive type according to
3609 * current clock rate
3610 */
3611 rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
3612 if (rc)
3613 pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
3614 mmc_hostname(host->mmc), __func__,
3615 clock, rc);
3616
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303617 /*
3618 * Update the bus vote in case of frequency change due to
3619 * clock scaling.
3620 */
3621 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303622 }
3623out:
3624 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303625}
3626
Sahitya Tummala14613432013-03-21 11:13:25 +05303627static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
3628 unsigned int uhs)
3629{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003630 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3631 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303632 const struct sdhci_msm_offset *msm_host_offset =
3633 msm_host->offset;
Sahitya Tummala14613432013-03-21 11:13:25 +05303634 u16 ctrl_2;
3635
3636 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3637 /* Select Bus Speed Mode for host */
3638 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003639 if ((uhs == MMC_TIMING_MMC_HS400) ||
3640 (uhs == MMC_TIMING_MMC_HS200) ||
3641 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05303642 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
3643 else if (uhs == MMC_TIMING_UHS_SDR12)
3644 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
3645 else if (uhs == MMC_TIMING_UHS_SDR25)
3646 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
3647 else if (uhs == MMC_TIMING_UHS_SDR50)
3648 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08003649 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
3650 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05303651 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303652 /*
3653 * When clock frquency is less than 100MHz, the feedback clock must be
3654 * provided and DLL must not be used so that tuning can be skipped. To
3655 * provide feedback clock, the mode selection can be any value less
3656 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
3657 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003658 if (host->clock <= CORE_FREQ_100MHZ) {
3659 if ((uhs == MMC_TIMING_MMC_HS400) ||
3660 (uhs == MMC_TIMING_MMC_HS200) ||
3661 (uhs == MMC_TIMING_UHS_SDR104))
3662 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303663
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003664 /*
3665 * Make sure DLL is disabled when not required
3666 *
3667 * Write 1 to DLL_RST bit of DLL_CONFIG register
3668 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303669 writel_relaxed((readl_relaxed(host->ioaddr +
3670 msm_host_offset->CORE_DLL_CONFIG)
3671 | CORE_DLL_RST), host->ioaddr +
3672 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003673
3674 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303675 writel_relaxed((readl_relaxed(host->ioaddr +
3676 msm_host_offset->CORE_DLL_CONFIG)
3677 | CORE_DLL_PDN), host->ioaddr +
3678 msm_host_offset->CORE_DLL_CONFIG);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003679 mb();
3680
3681 /*
3682 * The DLL needs to be restored and CDCLP533 recalibrated
3683 * when the clock frequency is set back to 400MHz.
3684 */
3685 msm_host->calibration_done = false;
3686 }
3687
3688 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
3689 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05303690 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
3691
3692}
3693
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003694#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003695#define DRV_NAME "cmdq-host"
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303696static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003697{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303698 int i = 0;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3700 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303701 const struct sdhci_msm_offset *msm_host_offset =
3702 msm_host->offset;
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303703 struct cmdq_host *cq_host = host->cq_host;
3704
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303705 u32 version = sdhci_msm_readl_relaxed(host,
3706 msm_host_offset->CORE_MCI_VERSION);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003707 u16 minor = version & CORE_VERSION_TARGET_MASK;
3708 /* registers offset changed starting from 4.2.0 */
3709 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
3710
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +05303711 if (cq_host->offset_changed)
3712 offset += CQ_V5_VENDOR_CFG;
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003713 pr_err("---- Debug RAM dump ----\n");
3714 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
3715 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
3716 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
3717
3718 while (i < 16) {
3719 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
3720 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
3721 i++;
3722 }
3723 pr_err("-------------------------\n");
3724}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303725
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303726static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
3727{
3728 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3729 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3730 struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
3731
3732 memcpy(&cached_data->copy_mmc, msm_host->mmc,
3733 sizeof(struct mmc_host));
3734 if (msm_host->mmc->card)
3735 memcpy(&cached_data->copy_card, msm_host->mmc->card,
3736 sizeof(struct mmc_card));
3737 memcpy(&cached_data->copy_host, host,
3738 sizeof(struct sdhci_host));
3739}
3740
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303741void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
3742{
3743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3744 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303745 const struct sdhci_msm_offset *msm_host_offset =
3746 msm_host->offset;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303747 int tbsel, tbsel2;
3748 int i, index = 0;
3749 u32 test_bus_val = 0;
3750 u32 debug_reg[MAX_TEST_BUS] = {0};
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303751 u32 sts = 0;
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303752
Sayali Lokhandec8ad70f2016-12-14 11:10:55 +05303753 sdhci_msm_cache_debug_data(host);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303754 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003755 if (host->cq_host)
Ritesh Harjani17f5d812015-12-23 13:21:02 +05303756 sdhci_msm_cmdq_dump_debug_ram(host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03003757
Sayali Lokhandebff771e2016-11-30 11:35:22 +05303758 MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
3759 sdhci_msm_readl_relaxed(host,
3760 msm_host_offset->CORE_MCI_DATA_CNT),
3761 sdhci_msm_readl_relaxed(host,
3762 msm_host_offset->CORE_MCI_FIFO_CNT));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303763 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303764 sdhci_msm_readl_relaxed(host,
3765 msm_host_offset->CORE_MCI_DATA_CNT),
3766 sdhci_msm_readl_relaxed(host,
3767 msm_host_offset->CORE_MCI_FIFO_CNT),
3768 sdhci_msm_readl_relaxed(host,
3769 msm_host_offset->CORE_MCI_STATUS));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303770 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303771 readl_relaxed(host->ioaddr +
3772 msm_host_offset->CORE_DLL_CONFIG),
3773 readl_relaxed(host->ioaddr +
3774 msm_host_offset->CORE_DLL_STATUS),
3775 sdhci_msm_readl_relaxed(host,
3776 msm_host_offset->CORE_MCI_VERSION));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303777 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303778 readl_relaxed(host->ioaddr +
3779 msm_host_offset->CORE_VENDOR_SPEC),
3780 readl_relaxed(host->ioaddr +
3781 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
3782 readl_relaxed(host->ioaddr +
3783 msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303784 pr_info("Vndr func2: 0x%08x\n",
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303785 readl_relaxed(host->ioaddr +
3786 msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303787
3788 /*
3789 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
3790 * of CORE_TESTBUS_CONFIG register.
3791 *
3792 * To select test bus 0 to 7 use tbsel and to select any test bus
3793 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
3794 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
3795 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
3796 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08003797 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303798 for (tbsel = 0; tbsel < 8; tbsel++) {
3799 if (index >= MAX_TEST_BUS)
3800 break;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303801 test_bus_val =
3802 (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
3803 tbsel | msm_host_offset->CORE_TESTBUS_ENA;
3804 sdhci_msm_writel_relaxed(test_bus_val, host,
3805 msm_host_offset->CORE_TESTBUS_CONFIG);
3806 debug_reg[index++] = sdhci_msm_readl_relaxed(host,
3807 msm_host_offset->CORE_SDCC_DEBUG_REG);
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303808 }
3809 }
3810 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3811 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3812 i, i + 3, debug_reg[i], debug_reg[i+1],
3813 debug_reg[i+2], debug_reg[i+3]);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303814 if (host->is_crypto_en) {
3815 sdhci_msm_ice_get_status(host, &sts);
3816 pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
Venkat Gopalakrishnan6324ee62015-10-22 17:53:30 -07003817 sdhci_msm_ice_print_regs(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303818 }
3819}
3820
3821static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
3822{
3823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3824 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3825
3826 /* Set ICE core to be reset in sync with SDHC core */
Veerabhadrarao Badiganti4e40ad62017-01-31 17:09:16 +05303827 if (msm_host->ice.pdev) {
3828 if (msm_host->ice_hci_support)
3829 writel_relaxed(1, host->ioaddr +
3830 HC_VENDOR_SPECIFIC_ICE_CTRL);
3831 else
3832 writel_relaxed(1,
3833 host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
3834 }
Sahitya Tummala9325fb02015-05-08 11:53:29 +05303835
3836 sdhci_reset(host, mask);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003837}
3838
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303839/*
3840 * sdhci_msm_enhanced_strobe_mask :-
3841 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3842 * SW should write 3 to
3843 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3844 * The default reset value of this register is 2.
3845 */
3846static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3847{
3848 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3849 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303850 const struct sdhci_msm_offset *msm_host_offset =
3851 msm_host->offset;
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303852
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303853 if (!msm_host->enhanced_strobe ||
3854 !mmc_card_strobe(msm_host->mmc->card)) {
3855 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303856 mmc_hostname(host->mmc));
3857 return;
3858 }
3859
3860 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303861 writel_relaxed((readl_relaxed(host->ioaddr +
3862 msm_host_offset->CORE_VENDOR_SPEC3)
3863 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3864 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303865 } else {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303866 writel_relaxed((readl_relaxed(host->ioaddr +
3867 msm_host_offset->CORE_VENDOR_SPEC3)
3868 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3869 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303870 }
3871}
3872
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003873static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3874{
3875 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3876 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303877 const struct sdhci_msm_offset *msm_host_offset =
3878 msm_host->offset;
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003879
3880 if (set) {
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303881 sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
3882 host, msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003883 } else {
3884 u32 value;
3885
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303886 value = sdhci_msm_readl_relaxed(host,
3887 msm_host_offset->CORE_TESTBUS_CONFIG);
3888 value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
3889 sdhci_msm_writel_relaxed(value, host,
3890 msm_host_offset->CORE_TESTBUS_CONFIG);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003891 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303892}
3893
Pavan Anamula691dd592015-08-25 16:11:20 +05303894void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3895{
3896 u32 vendor_func2;
3897 unsigned long timeout;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303898 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3899 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3900 const struct sdhci_msm_offset *msm_host_offset =
3901 msm_host->offset;
Pavan Anamula691dd592015-08-25 16:11:20 +05303902
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303903 vendor_func2 = readl_relaxed(host->ioaddr +
3904 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303905
3906 if (enable) {
3907 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303908 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303909 timeout = 10000;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303910 while (readl_relaxed(host->ioaddr +
3911 msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303912 if (timeout == 0) {
3913 pr_info("%s: Applying wait idle disable workaround\n",
3914 mmc_hostname(host->mmc));
3915 /*
3916 * Apply the reset workaround to not wait for
3917 * pending data transfers on AXI before
3918 * resetting the controller. This could be
3919 * risky if the transfers were stuck on the
3920 * AXI bus.
3921 */
3922 vendor_func2 = readl_relaxed(host->ioaddr +
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303923 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303924 writel_relaxed(vendor_func2 |
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303925 HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
3926 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303927 host->reset_wa_t = ktime_get();
3928 return;
3929 }
3930 timeout--;
3931 udelay(10);
3932 }
3933 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3934 mmc_hostname(host->mmc));
3935 } else {
3936 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05303937 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Pavan Anamula691dd592015-08-25 16:11:20 +05303938 }
3939}
3940
Gilad Broner44445992015-09-29 16:05:39 +03003941static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3942{
3943 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
Asutosh Das36c2e922015-12-01 12:19:58 +05303944 container_of(work, struct sdhci_msm_pm_qos_irq,
3945 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03003946
3947 if (atomic_read(&pm_qos_irq->counter))
3948 return;
3949
3950 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3951 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3952}
3953
3954void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3955{
3956 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3957 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3958 struct sdhci_msm_pm_qos_latency *latency =
3959 &msm_host->pdata->pm_qos_data.irq_latency;
3960 int counter;
3961
3962 if (!msm_host->pm_qos_irq.enabled)
3963 return;
3964
3965 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3966 /* Make sure to update the voting in case power policy has changed */
3967 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3968 && counter > 1)
3969 return;
3970
Asutosh Das36c2e922015-12-01 12:19:58 +05303971 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03003972 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3973 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3974 msm_host->pm_qos_irq.latency);
3975}
3976
3977void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3978{
3979 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3980 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3981 int counter;
3982
3983 if (!msm_host->pm_qos_irq.enabled)
3984 return;
3985
Subhash Jadavani4d813902015-10-15 12:16:43 -07003986 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3987 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3988 } else {
3989 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3990 return;
Gilad Broner44445992015-09-29 16:05:39 +03003991 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003992
Gilad Broner44445992015-09-29 16:05:39 +03003993 if (counter)
3994 return;
3995
3996 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05303997 queue_delayed_work(msm_host->pm_qos_wq,
3998 &msm_host->pm_qos_irq.unvote_work,
3999 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner44445992015-09-29 16:05:39 +03004000 return;
4001 }
4002
4003 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4004 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4005 msm_host->pm_qos_irq.latency);
4006}
4007
Gilad Broner68c54562015-09-20 11:59:46 +03004008static ssize_t
4009sdhci_msm_pm_qos_irq_show(struct device *dev,
4010 struct device_attribute *attr, char *buf)
4011{
4012 struct sdhci_host *host = dev_get_drvdata(dev);
4013 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4014 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4015 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
4016
4017 return snprintf(buf, PAGE_SIZE,
4018 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
4019 irq->enabled, atomic_read(&irq->counter), irq->latency);
4020}
4021
4022static ssize_t
4023sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
4024 struct device_attribute *attr, char *buf)
4025{
4026 struct sdhci_host *host = dev_get_drvdata(dev);
4027 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4028 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4029
4030 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
4031}
4032
4033static ssize_t
4034sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
4035 struct device_attribute *attr, const char *buf, size_t count)
4036{
4037 struct sdhci_host *host = dev_get_drvdata(dev);
4038 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4039 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4040 uint32_t value;
4041 bool enable;
4042 int ret;
4043
4044 ret = kstrtou32(buf, 0, &value);
4045 if (ret)
4046 goto out;
4047 enable = !!value;
4048
4049 if (enable == msm_host->pm_qos_irq.enabled)
4050 goto out;
4051
4052 msm_host->pm_qos_irq.enabled = enable;
4053 if (!enable) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304054 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004055 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4056 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
4057 pm_qos_update_request(&msm_host->pm_qos_irq.req,
4058 msm_host->pm_qos_irq.latency);
4059 }
4060
4061out:
4062 return count;
4063}
4064
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004065#ifdef CONFIG_SMP
4066static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4067 struct sdhci_host *host)
4068{
4069 msm_host->pm_qos_irq.req.irq = host->irq;
4070}
4071#else
4072static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
4073 struct sdhci_host *host) { }
4074#endif
4075
Vijay Viswanath1971d222018-03-01 12:01:47 +05304076static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
4077{
4078 char *wq = NULL;
4079 bool ret = true;
4080
4081 wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
4082 dev_name(&msm_host->pdev->dev));
4083 if (!wq)
4084 return false;
4085 /*
4086 * Create a work queue with flag WQ_MEM_RECLAIM set for
4087 * pm_qos_unvote work. Because mmc thread is created with
4088 * flag PF_MEMALLOC set, kernel will check for work queue
4089 * flag WQ_MEM_RECLAIM when flush the work queue. If work
4090 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
4091 * will be triggered.
4092 */
4093 msm_host->pm_qos_wq = create_workqueue(wq);
4094 if (!msm_host->pm_qos_wq) {
4095 ret = false;
4096 dev_err(&msm_host->pdev->dev,
4097 "failed to create pm qos unvote work queue\n");
4098 }
4099 kfree(wq);
4100 return ret;
4101}
4102
Gilad Broner44445992015-09-29 16:05:39 +03004103void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
4104{
4105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4106 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4107 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03004108 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004109
4110 if (!msm_host->pdata->pm_qos_data.irq_valid)
4111 return;
4112
4113 /* Initialize only once as this gets called per partition */
4114 if (msm_host->pm_qos_irq.enabled)
4115 return;
4116
4117 atomic_set(&msm_host->pm_qos_irq.counter, 0);
4118 msm_host->pm_qos_irq.req.type =
4119 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07004120 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
4121 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
4122 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03004123 else
4124 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
4125 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
4126
Vijay Viswanath1971d222018-03-01 12:01:47 +05304127 sdhci_msm_pm_qos_wq_init(msm_host);
4128
Asutosh Das36c2e922015-12-01 12:19:58 +05304129 INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004130 sdhci_msm_pm_qos_irq_unvote_work);
4131 /* For initialization phase, set the performance latency */
4132 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
4133 msm_host->pm_qos_irq.latency =
4134 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
4135 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
4136 msm_host->pm_qos_irq.latency);
4137 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004138
4139 /* sysfs */
4140 msm_host->pm_qos_irq.enable_attr.show =
4141 sdhci_msm_pm_qos_irq_enable_show;
4142 msm_host->pm_qos_irq.enable_attr.store =
4143 sdhci_msm_pm_qos_irq_enable_store;
4144 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
4145 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
4146 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
4147 ret = device_create_file(&msm_host->pdev->dev,
4148 &msm_host->pm_qos_irq.enable_attr);
4149 if (ret)
4150 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
4151 __func__, ret);
4152
4153 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
4154 msm_host->pm_qos_irq.status_attr.store = NULL;
4155 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
4156 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
4157 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
4158 ret = device_create_file(&msm_host->pdev->dev,
4159 &msm_host->pm_qos_irq.status_attr);
4160 if (ret)
4161 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
4162 __func__, ret);
4163}
4164
4165static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
4166 struct device_attribute *attr, char *buf)
4167{
4168 struct sdhci_host *host = dev_get_drvdata(dev);
4169 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4170 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4171 struct sdhci_msm_pm_qos_group *group;
4172 int i;
4173 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4174 int offset = 0;
4175
4176 for (i = 0; i < nr_groups; i++) {
4177 group = &msm_host->pm_qos[i];
4178 offset += snprintf(&buf[offset], PAGE_SIZE,
4179 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
4180 i, group->req.cpus_affine.bits[0],
4181 msm_host->pm_qos_group_enable,
4182 atomic_read(&group->counter),
4183 group->latency);
4184 }
4185
4186 return offset;
4187}
4188
4189static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
4190 struct device_attribute *attr, char *buf)
4191{
4192 struct sdhci_host *host = dev_get_drvdata(dev);
4193 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4194 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4195
4196 return snprintf(buf, PAGE_SIZE, "%s\n",
4197 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
4198}
4199
4200static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
4201 struct device_attribute *attr, const char *buf, size_t count)
4202{
4203 struct sdhci_host *host = dev_get_drvdata(dev);
4204 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4205 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4206 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4207 uint32_t value;
4208 bool enable;
4209 int ret;
4210 int i;
4211
4212 ret = kstrtou32(buf, 0, &value);
4213 if (ret)
4214 goto out;
4215 enable = !!value;
4216
4217 if (enable == msm_host->pm_qos_group_enable)
4218 goto out;
4219
4220 msm_host->pm_qos_group_enable = enable;
4221 if (!enable) {
4222 for (i = 0; i < nr_groups; i++) {
Asutosh Das36c2e922015-12-01 12:19:58 +05304223 cancel_delayed_work_sync(
4224 &msm_host->pm_qos[i].unvote_work);
Gilad Broner68c54562015-09-20 11:59:46 +03004225 atomic_set(&msm_host->pm_qos[i].counter, 0);
4226 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
4227 pm_qos_update_request(&msm_host->pm_qos[i].req,
4228 msm_host->pm_qos[i].latency);
4229 }
4230 }
4231
4232out:
4233 return count;
Gilad Broner44445992015-09-29 16:05:39 +03004234}
4235
4236static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
4237{
4238 int i;
4239 struct sdhci_msm_cpu_group_map *map =
4240 &msm_host->pdata->pm_qos_data.cpu_group_map;
4241
4242 if (cpu < 0)
4243 goto not_found;
4244
4245 for (i = 0; i < map->nr_groups; i++)
4246 if (cpumask_test_cpu(cpu, &map->mask[i]))
4247 return i;
4248
4249not_found:
4250 return -EINVAL;
4251}
4252
4253void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
4254 struct sdhci_msm_pm_qos_latency *latency, int cpu)
4255{
4256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4257 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4258 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4259 struct sdhci_msm_pm_qos_group *pm_qos_group;
4260 int counter;
4261
4262 if (!msm_host->pm_qos_group_enable || group < 0)
4263 return;
4264
4265 pm_qos_group = &msm_host->pm_qos[group];
4266 counter = atomic_inc_return(&pm_qos_group->counter);
4267
4268 /* Make sure to update the voting in case power policy has changed */
4269 if (pm_qos_group->latency == latency->latency[host->power_policy]
4270 && counter > 1)
4271 return;
4272
Asutosh Das36c2e922015-12-01 12:19:58 +05304273 cancel_delayed_work_sync(&pm_qos_group->unvote_work);
Gilad Broner44445992015-09-29 16:05:39 +03004274
4275 pm_qos_group->latency = latency->latency[host->power_policy];
4276 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
4277}
4278
4279static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
4280{
4281 struct sdhci_msm_pm_qos_group *group =
Asutosh Das36c2e922015-12-01 12:19:58 +05304282 container_of(work, struct sdhci_msm_pm_qos_group,
4283 unvote_work.work);
Gilad Broner44445992015-09-29 16:05:39 +03004284
4285 if (atomic_read(&group->counter))
4286 return;
4287
4288 group->latency = PM_QOS_DEFAULT_VALUE;
4289 pm_qos_update_request(&group->req, group->latency);
4290}
4291
Gilad Broner07d92eb2015-09-29 16:57:21 +03004292bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03004293{
4294 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4295 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4296 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
4297
4298 if (!msm_host->pm_qos_group_enable || group < 0 ||
4299 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03004300 return false;
Gilad Broner44445992015-09-29 16:05:39 +03004301
4302 if (async) {
Vijay Viswanath1971d222018-03-01 12:01:47 +05304303 queue_delayed_work(msm_host->pm_qos_wq,
4304 &msm_host->pm_qos[group].unvote_work,
4305 msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
Gilad Broner07d92eb2015-09-29 16:57:21 +03004306 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004307 }
4308
4309 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
4310 pm_qos_update_request(&msm_host->pm_qos[group].req,
4311 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03004312 return true;
Gilad Broner44445992015-09-29 16:05:39 +03004313}
4314
4315void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
4316 struct sdhci_msm_pm_qos_latency *latency)
4317{
4318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4319 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4320 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
4321 struct sdhci_msm_pm_qos_group *group;
4322 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03004323 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03004324
4325 if (msm_host->pm_qos_group_enable)
4326 return;
4327
4328 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
4329 GFP_KERNEL);
4330 if (!msm_host->pm_qos)
4331 return;
4332
4333 for (i = 0; i < nr_groups; i++) {
4334 group = &msm_host->pm_qos[i];
Asutosh Das36c2e922015-12-01 12:19:58 +05304335 INIT_DELAYED_WORK(&group->unvote_work,
Gilad Broner44445992015-09-29 16:05:39 +03004336 sdhci_msm_pm_qos_cpu_unvote_work);
4337 atomic_set(&group->counter, 0);
4338 group->req.type = PM_QOS_REQ_AFFINE_CORES;
4339 cpumask_copy(&group->req.cpus_affine,
4340 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
Ritesh Harjanib41e0572017-03-28 13:19:26 +05304341 /* We set default latency here for all pm_qos cpu groups. */
4342 group->latency = PM_QOS_DEFAULT_VALUE;
Gilad Broner44445992015-09-29 16:05:39 +03004343 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
4344 group->latency);
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304345 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
Gilad Broner44445992015-09-29 16:05:39 +03004346 __func__, i,
4347 group->req.cpus_affine.bits[0],
Vijay Viswanathd9311f92017-12-11 10:52:49 +05304348 group->latency);
Gilad Broner44445992015-09-29 16:05:39 +03004349 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03004350 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03004351 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03004352
4353 /* sysfs */
4354 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
4355 msm_host->pm_qos_group_status_attr.store = NULL;
4356 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
4357 msm_host->pm_qos_group_status_attr.attr.name =
4358 "pm_qos_cpu_groups_status";
4359 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
4360 ret = device_create_file(&msm_host->pdev->dev,
4361 &msm_host->pm_qos_group_status_attr);
4362 if (ret)
4363 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
4364 __func__, ret);
4365 msm_host->pm_qos_group_enable_attr.show =
4366 sdhci_msm_pm_qos_group_enable_show;
4367 msm_host->pm_qos_group_enable_attr.store =
4368 sdhci_msm_pm_qos_group_enable_store;
4369 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
4370 msm_host->pm_qos_group_enable_attr.attr.name =
4371 "pm_qos_cpu_groups_enable";
4372 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
4373 ret = device_create_file(&msm_host->pdev->dev,
4374 &msm_host->pm_qos_group_enable_attr);
4375 if (ret)
4376 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
4377 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03004378}
4379
Gilad Broner07d92eb2015-09-29 16:57:21 +03004380static void sdhci_msm_pre_req(struct sdhci_host *host,
4381 struct mmc_request *mmc_req)
4382{
4383 int cpu;
4384 int group;
4385 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4386 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4387 int prev_group = sdhci_msm_get_cpu_group(msm_host,
4388 msm_host->pm_qos_prev_cpu);
4389
4390 sdhci_msm_pm_qos_irq_vote(host);
4391
4392 cpu = get_cpu();
4393 put_cpu();
4394 group = sdhci_msm_get_cpu_group(msm_host, cpu);
4395 if (group < 0)
4396 return;
4397
4398 if (group != prev_group && prev_group >= 0) {
4399 sdhci_msm_pm_qos_cpu_unvote(host,
4400 msm_host->pm_qos_prev_cpu, false);
4401 prev_group = -1; /* make sure to vote for new group */
4402 }
4403
4404 if (prev_group < 0) {
4405 sdhci_msm_pm_qos_cpu_vote(host,
4406 msm_host->pdata->pm_qos_data.latency, cpu);
4407 msm_host->pm_qos_prev_cpu = cpu;
4408 }
4409}
4410
4411static void sdhci_msm_post_req(struct sdhci_host *host,
4412 struct mmc_request *mmc_req)
4413{
4414 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4415 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4416
4417 sdhci_msm_pm_qos_irq_unvote(host, false);
4418
4419 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
4420 msm_host->pm_qos_prev_cpu = -1;
4421}
4422
4423static void sdhci_msm_init(struct sdhci_host *host)
4424{
4425 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4426 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4427
4428 sdhci_msm_pm_qos_irq_init(host);
4429
4430 if (msm_host->pdata->pm_qos_data.legacy_valid)
4431 sdhci_msm_pm_qos_cpu_init(host,
4432 msm_host->pdata->pm_qos_data.latency);
4433}
4434
Sahitya Tummala9150a942014-10-31 15:33:04 +05304435static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
4436{
4437 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4438 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4439 struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
4440 u32 max_curr = 0;
4441
4442 if (curr_slot && curr_slot->vdd_data)
4443 max_curr = curr_slot->vdd_data->hpm_uA;
4444
4445 return max_curr;
4446}
4447
Sahitya Tummala073ca552015-08-06 13:59:37 +05304448static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
4449{
4450 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4451 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4452 int ret = 0;
4453 u32 clk_rate = 0;
4454
4455 if (!IS_ERR(msm_host->ice_clk)) {
4456 clk_rate = (state == MMC_LOAD_LOW) ?
4457 msm_host->pdata->ice_clk_min :
4458 msm_host->pdata->ice_clk_max;
4459 if (msm_host->ice_clk_rate == clk_rate)
4460 return 0;
4461 pr_debug("%s: changing ICE clk rate to %u\n",
4462 mmc_hostname(host->mmc), clk_rate);
4463 ret = clk_set_rate(msm_host->ice_clk, clk_rate);
4464 if (ret) {
4465 pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
4466 mmc_hostname(host->mmc), ret, clk_rate);
4467 return ret;
4468 }
4469 msm_host->ice_clk_rate = clk_rate;
4470 }
4471 return 0;
4472}
4473
Asutosh Das0ef24812012-12-18 16:14:02 +05304474static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304475 .crypto_engine_cfg = sdhci_msm_ice_cfg,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304476 .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
Veerabhadrarao Badiganti6c6b97a2017-03-08 06:51:49 +05304477 .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
Veerabhadrarao Badigantidec58802017-01-31 11:21:37 +05304478 .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304479 .crypto_engine_reset = sdhci_msm_ice_reset,
Sahitya Tummala14613432013-03-21 11:13:25 +05304480 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05304481 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004482 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05304483 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004484 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05304485 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304486 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304487 .get_min_clock = sdhci_msm_get_min_clock,
4488 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05304489 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304490 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304491 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08004492 .set_bus_width = sdhci_set_bus_width,
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304493 .reset = sdhci_msm_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07004494 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05304495 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Pavan Anamula691dd592015-08-25 16:11:20 +05304496 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03004497 .init = sdhci_msm_init,
4498 .pre_req = sdhci_msm_pre_req,
4499 .post_req = sdhci_msm_post_req,
Sahitya Tummala9150a942014-10-31 15:33:04 +05304500 .get_current_limit = sdhci_msm_get_current_limit,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304501 .notify_load = sdhci_msm_notify_load,
Asutosh Das0ef24812012-12-18 16:14:02 +05304502};
4503
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304504static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
4505 struct sdhci_host *host)
4506{
Krishna Konda46fd1432014-10-30 21:13:27 -07004507 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304508 u16 minor;
4509 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304510 u32 val;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304511 const struct sdhci_msm_offset *msm_host_offset =
4512 msm_host->offset;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304513
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304514 version = sdhci_msm_readl_relaxed(host,
4515 msm_host_offset->CORE_MCI_VERSION);
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304516 major = (version & CORE_VERSION_MAJOR_MASK) >>
4517 CORE_VERSION_MAJOR_SHIFT;
4518 minor = version & CORE_VERSION_TARGET_MASK;
4519
Krishna Konda46fd1432014-10-30 21:13:27 -07004520 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
4521
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304522 /*
4523 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004524 * controller won't advertise 3.0v, 1.8v and 8-bit features
4525 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304526 */
4527 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004528 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004529 /*
4530 * Enable 1.8V support capability on controllers that
4531 * support dual voltage
4532 */
4533 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07004534 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
4535 caps |= CORE_3_0V_SUPPORT;
4536 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07004537 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05304538 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
4539 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304540 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004541
4542 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304543 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
4544 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
4545 */
4546 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05304547 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304548 val = readl_relaxed(host->ioaddr +
4549 msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304550 writel_relaxed((val | CORE_ONE_MID_EN),
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304551 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05304552 }
4553 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07004554 * SDCC 5 controller with major version 1, minor version 0x34 and later
4555 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
4556 */
4557 if ((major == 1) && (minor < 0x34))
4558 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03004559
4560 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004561 * SDCC 5 controller with major version 1, minor version 0x42 and later
4562 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05304563 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004564 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05304565 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004566 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05304567 msm_host->enhanced_strobe = true;
4568 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08004569
4570 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03004571 * SDCC 5 controller with major version 1 and minor version 0x42,
4572 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
4573 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05304574 * when MCLK is gated OFF, it is not gated for less than 0.5us
4575 * and MCLK must be switched on for at-least 1us before DATA
4576 * starts coming.
4577 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03004578 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
Veerabhadrarao Badiganti06d2c8c2017-09-12 17:24:09 +05304579 (minor == 0x49) || (minor >= 0x6b)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05304580 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004581
Pavan Anamula5a256df2015-10-16 14:38:28 +05304582 /* Fake 3.0V support for SDIO devices which requires such voltage */
Veerabhadrarao Badigantiac24b402017-03-07 06:30:13 +05304583 if (msm_host->core_3_0v_support) {
Pavan Anamula5a256df2015-10-16 14:38:28 +05304584 caps |= CORE_3_0V_SUPPORT;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304585 writel_relaxed((readl_relaxed(host->ioaddr +
4586 SDHCI_CAPABILITIES) | caps), host->ioaddr +
4587 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Pavan Anamula5a256df2015-10-16 14:38:28 +05304588 }
4589
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07004590 if ((major == 1) && (minor >= 0x49))
4591 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05304592 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03004593 * Mask 64-bit support for controller with 32-bit address bus so that
4594 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03004595 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08004596 if (!msm_host->pdata->largeaddressbus)
4597 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
4598
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304599 writel_relaxed(caps, host->ioaddr +
4600 msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07004601 /* keep track of the value in SDHCI_CAPABILITIES */
4602 msm_host->caps_0 = caps;
Ritesh Harjani82124772014-11-04 15:34:00 +05304603
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304604 if ((major == 1) && (minor >= 0x6b)) {
Ritesh Harjani82124772014-11-04 15:34:00 +05304605 msm_host->ice_hci_support = true;
Sayali Lokhande9efe6572017-07-12 09:22:38 +05304606 host->cdr_support = true;
4607 }
Veerabhadrarao Badigantib8f2b0c2018-03-14 15:21:05 +05304608
4609 if ((major == 1) && (minor >= 0x71))
4610 msm_host->need_dll_user_ctl = true;
4611
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304612}
4613
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004614#ifdef CONFIG_MMC_CQ_HCI
4615static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4616 struct platform_device *pdev)
4617{
4618 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4619 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4620
Ritesh Harjani7270ca22017-01-03 15:46:06 +05304621 if (nocmdq) {
4622 dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
4623 return;
4624 }
4625
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004626 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004627 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004628 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
4629 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004630 host->cq_host = NULL;
4631 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004632 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07004633 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004634}
4635#else
4636static void sdhci_msm_cmdq_init(struct sdhci_host *host,
4637 struct platform_device *pdev)
4638{
4639
4640}
4641#endif
4642
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004643static bool sdhci_msm_is_bootdevice(struct device *dev)
4644{
4645 if (strnstr(saved_command_line, "androidboot.bootdevice=",
4646 strlen(saved_command_line))) {
4647 char search_string[50];
4648
4649 snprintf(search_string, ARRAY_SIZE(search_string),
4650 "androidboot.bootdevice=%s", dev_name(dev));
4651 if (strnstr(saved_command_line, search_string,
4652 strlen(saved_command_line)))
4653 return true;
4654 else
4655 return false;
4656 }
4657
4658 /*
4659 * "androidboot.bootdevice=" argument is not present then
4660 * return true as we don't know the boot device anyways.
4661 */
4662 return true;
4663}
4664
Asutosh Das0ef24812012-12-18 16:14:02 +05304665static int sdhci_msm_probe(struct platform_device *pdev)
4666{
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304667 const struct sdhci_msm_offset *msm_host_offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304668 struct sdhci_host *host;
4669 struct sdhci_pltfm_host *pltfm_host;
4670 struct sdhci_msm_host *msm_host;
4671 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004672 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07004673 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004674 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05304675 struct resource *tlmm_memres = NULL;
4676 void __iomem *tlmm_mem;
Ritesh Harjani42876f42015-11-17 17:46:51 +05304677 unsigned long flags;
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004678 bool force_probe;
Asutosh Das0ef24812012-12-18 16:14:02 +05304679
4680 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
4681 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
4682 GFP_KERNEL);
4683 if (!msm_host) {
4684 ret = -ENOMEM;
4685 goto out;
4686 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304687
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304688 if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
4689 msm_host->mci_removed = true;
4690 msm_host->offset = &sdhci_msm_offset_mci_removed;
4691 } else {
4692 msm_host->mci_removed = false;
4693 msm_host->offset = &sdhci_msm_offset_mci_present;
4694 }
4695 msm_host_offset = msm_host->offset;
Asutosh Das0ef24812012-12-18 16:14:02 +05304696 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
4697 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
4698 if (IS_ERR(host)) {
4699 ret = PTR_ERR(host);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304700 goto out_host_free;
Asutosh Das0ef24812012-12-18 16:14:02 +05304701 }
4702
4703 pltfm_host = sdhci_priv(host);
4704 pltfm_host->priv = msm_host;
4705 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304706 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05304707
Asutosh Das1c43b132018-01-11 18:08:40 +05304708 ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
4709 if (ret == -EPROBE_DEFER) {
4710 dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
4711 goto pltfm_free;
4712 }
4713
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304714 /* get the ice device vops if present */
4715 ret = sdhci_msm_ice_get_dev(host);
4716 if (ret == -EPROBE_DEFER) {
4717 /*
4718 * SDHCI driver might be probed before ICE driver does.
4719 * In that case we would like to return EPROBE_DEFER code
4720 * in order to delay its probing.
4721 */
4722 dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
4723 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004724 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304725
4726 } else if (ret == -ENODEV) {
4727 /*
4728 * ICE device is not enabled in DTS file. No need for further
4729 * initialization of ICE driver.
4730 */
4731 dev_warn(&pdev->dev, "%s: ICE device is not enabled",
4732 __func__);
4733 } else if (ret) {
4734 dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
4735 __func__, ret);
Venkat Gopalakrishnan94e408d2015-06-15 16:49:29 -07004736 goto pltfm_free;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304737 }
4738
Asutosh Das0ef24812012-12-18 16:14:02 +05304739 /* Extract platform data */
4740 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004741 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
Pavan Anamulaf2dda062016-03-30 22:07:56 +05304742 if (ret <= 0) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004743 dev_err(&pdev->dev, "Failed to get slot index %d\n",
4744 ret);
4745 goto pltfm_free;
4746 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004747
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004748 /* Read property to determine if the probe is forced */
4749 force_probe = of_find_property(pdev->dev.of_node,
4750 "qcom,force-sdhc1-probe", NULL);
4751
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004752 /* skip the probe if eMMC isn't a boot device */
Gustavo Solaira46578dc22017-08-18 11:18:00 -07004753 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
4754 && !force_probe) {
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004755 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004756 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07004757 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07004758
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07004759 if (disable_slots & (1 << (ret - 1))) {
4760 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
4761 ret);
4762 ret = -ENODEV;
4763 goto pltfm_free;
4764 }
4765
Sayali Lokhande5f768322016-04-11 18:36:53 +05304766 if (ret <= 2)
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07004767 sdhci_slot[ret-1] = msm_host;
4768
Dov Levenglickc9033ab2015-03-10 16:00:56 +02004769 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
4770 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304771 if (!msm_host->pdata) {
4772 dev_err(&pdev->dev, "DT parsing error\n");
4773 goto pltfm_free;
4774 }
4775 } else {
4776 dev_err(&pdev->dev, "No device tree node\n");
4777 goto pltfm_free;
4778 }
4779
4780 /* Setup Clocks */
4781
4782 /* Setup SDCC bus voter clock. */
4783 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
4784 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
4785 /* Vote for max. clk rate for max. performance */
4786 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
4787 if (ret)
4788 goto pltfm_free;
4789 ret = clk_prepare_enable(msm_host->bus_clk);
4790 if (ret)
4791 goto pltfm_free;
4792 }
4793
4794 /* Setup main peripheral bus clock */
4795 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
4796 if (!IS_ERR(msm_host->pclk)) {
4797 ret = clk_prepare_enable(msm_host->pclk);
4798 if (ret)
4799 goto bus_clk_disable;
4800 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05304801 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05304802
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304803 /* Setup SDC ufs bus aggr clock */
4804 msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
4805 if (!IS_ERR(msm_host->bus_aggr_clk)) {
4806 ret = clk_prepare_enable(msm_host->bus_aggr_clk);
4807 if (ret) {
4808 dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
4809 goto pclk_disable;
4810 }
4811 }
4812
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304813 if (msm_host->ice.pdev) {
4814 /* Setup SDC ICE clock */
4815 msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
4816 if (!IS_ERR(msm_host->ice_clk)) {
4817 /* ICE core has only one clock frequency for now */
4818 ret = clk_set_rate(msm_host->ice_clk,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304819 msm_host->pdata->ice_clk_max);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304820 if (ret) {
4821 dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
4822 ret,
Sahitya Tummala073ca552015-08-06 13:59:37 +05304823 msm_host->pdata->ice_clk_max);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304824 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304825 }
4826 ret = clk_prepare_enable(msm_host->ice_clk);
4827 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304828 goto bus_aggr_clk_disable;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304829
4830 msm_host->ice_clk_rate =
Sahitya Tummala073ca552015-08-06 13:59:37 +05304831 msm_host->pdata->ice_clk_max;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05304832 }
4833 }
4834
Asutosh Das0ef24812012-12-18 16:14:02 +05304835 /* Setup SDC MMC clock */
4836 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
4837 if (IS_ERR(msm_host->clk)) {
4838 ret = PTR_ERR(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304839 goto bus_aggr_clk_disable;
Asutosh Das0ef24812012-12-18 16:14:02 +05304840 }
4841
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304842 /* Set to the minimum supported clock frequency */
4843 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
4844 if (ret) {
4845 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304846 goto bus_aggr_clk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304847 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304848 ret = clk_prepare_enable(msm_host->clk);
4849 if (ret)
Vijay Viswanath674aeda2017-10-07 14:28:15 +05304850 goto bus_aggr_clk_disable;
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05304851
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304852 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304853 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304854
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004855 /* Setup CDC calibration fixed feedback clock */
4856 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
4857 if (!IS_ERR(msm_host->ff_clk)) {
4858 ret = clk_prepare_enable(msm_host->ff_clk);
4859 if (ret)
4860 goto clk_disable;
4861 }
4862
4863 /* Setup CDC calibration sleep clock */
4864 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
4865 if (!IS_ERR(msm_host->sleep_clk)) {
4866 ret = clk_prepare_enable(msm_host->sleep_clk);
4867 if (ret)
4868 goto ff_clk_disable;
4869 }
4870
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07004871 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
4872
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304873 ret = sdhci_msm_bus_register(msm_host, pdev);
4874 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004875 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304876
4877 if (msm_host->msm_bus_vote.client_handle)
4878 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
4879 sdhci_msm_bus_work);
4880 sdhci_msm_bus_voting(host, 1);
4881
Asutosh Das0ef24812012-12-18 16:14:02 +05304882 /* Setup regulators */
4883 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
4884 if (ret) {
4885 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304886 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05304887 }
4888
4889 /* Reset the core and Enable SDHC mode */
4890 core_memres = platform_get_resource_byname(pdev,
4891 IORESOURCE_MEM, "core_mem");
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304892 if (!msm_host->mci_removed) {
4893 if (!core_memres) {
4894 dev_err(&pdev->dev, "Failed to get iomem resource\n");
4895 goto vreg_deinit;
4896 }
4897 msm_host->core_mem = devm_ioremap(&pdev->dev,
4898 core_memres->start, resource_size(core_memres));
Asutosh Das0ef24812012-12-18 16:14:02 +05304899
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304900 if (!msm_host->core_mem) {
4901 dev_err(&pdev->dev, "Failed to remap registers\n");
4902 ret = -ENOMEM;
4903 goto vreg_deinit;
4904 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304905 }
4906
Sahitya Tummala079ed852015-10-29 20:18:45 +05304907 tlmm_memres = platform_get_resource_byname(pdev,
4908 IORESOURCE_MEM, "tlmm_mem");
4909 if (tlmm_memres) {
4910 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
4911 resource_size(tlmm_memres));
4912
4913 if (!tlmm_mem) {
4914 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
4915 ret = -ENOMEM;
4916 goto vreg_deinit;
4917 }
4918 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
Sahitya Tummala079ed852015-10-29 20:18:45 +05304919 }
4920
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304921 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004922 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304923 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07004924 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304925 host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05304926
Veerabhadrarao Badiganti6b495d42017-09-12 14:41:39 +05304927 /*
4928 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
4929 */
4930 writel_relaxed((readl_relaxed(host->ioaddr +
4931 msm_host_offset->CORE_VENDOR_SPEC3) &
4932 ~CORE_FIFO_ALT_EN), host->ioaddr +
4933 msm_host_offset->CORE_VENDOR_SPEC3);
4934
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304935 if (!msm_host->mci_removed) {
4936 /* Set HC_MODE_EN bit in HC_MODE register */
4937 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
Asutosh Das0ef24812012-12-18 16:14:02 +05304938
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304939 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
4940 writel_relaxed(readl_relaxed(msm_host->core_mem +
4941 CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
4942 msm_host->core_mem + CORE_HC_MODE);
4943 }
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05304944 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07004945
4946 /*
4947 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
4948 * be used as required later on.
4949 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304950 writel_relaxed((readl_relaxed(host->ioaddr +
4951 msm_host_offset->CORE_VENDOR_SPEC) |
4952 CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
4953 msm_host_offset->CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05304954 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05304955 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
4956 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
4957 * interrupt in GIC (by registering the interrupt handler), we need to
4958 * ensure that any pending power irq interrupt status is acknowledged
4959 * otherwise power irq interrupt handler would be fired prematurely.
4960 */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304961 irq_status = sdhci_msm_readl_relaxed(host,
4962 msm_host_offset->CORE_PWRCTL_STATUS);
4963 sdhci_msm_writel_relaxed(irq_status, host,
4964 msm_host_offset->CORE_PWRCTL_CLEAR);
4965 irq_ctl = sdhci_msm_readl_relaxed(host,
4966 msm_host_offset->CORE_PWRCTL_CTL);
4967
Subhash Jadavani28137342013-05-14 17:46:43 +05304968 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
4969 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
4970 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
4971 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05304972 sdhci_msm_writel_relaxed(irq_ctl, host,
4973 msm_host_offset->CORE_PWRCTL_CTL);
Krishna Konda46fd1432014-10-30 21:13:27 -07004974
Subhash Jadavani28137342013-05-14 17:46:43 +05304975 /*
4976 * Ensure that above writes are propogated before interrupt enablement
4977 * in GIC.
4978 */
4979 mb();
4980
4981 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05304982 * Following are the deviations from SDHC spec v3.0 -
4983 * 1. Card detection is handled using separate GPIO.
4984 * 2. Bus power control is handled by interacting with PMIC.
4985 */
4986 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
4987 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304988 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03004989 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05304990 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05304991 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05304992 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05304993 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Sahitya Tummala43fb3372016-04-05 14:00:48 +05304994 host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
Sahitya Tummaladb5e53d2016-04-05 15:29:35 +05304995 host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
Asutosh Das0ef24812012-12-18 16:14:02 +05304996
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05304997 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
4998 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
4999
Stephen Boyd8dce5c62013-04-24 14:19:46 -07005000 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07005001 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
5002 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
5003 SDHCI_VENDOR_VER_SHIFT));
5004 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
5005 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
5006 /*
5007 * Add 40us delay in interrupt handler when
5008 * operating at initialization frequency(400KHz).
5009 */
5010 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
5011 /*
5012 * Set Software Reset for DAT line in Software
5013 * Reset Register (Bit 2).
5014 */
5015 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
5016 }
5017
Asutosh Das214b9662013-06-13 14:27:42 +05305018 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
5019
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07005020 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005021 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
5022 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05305023 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005024 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05305025 goto vreg_deinit;
5026 }
Subhash Jadavanide139e82017-09-27 11:04:40 +05305027
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005028 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05305029 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07005030 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305031 if (ret) {
5032 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02005033 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05305034 goto vreg_deinit;
5035 }
5036
5037 /* Enable pwr irq interrupts */
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305038 sdhci_msm_writel_relaxed(INT_MASK, host,
5039 msm_host_offset->CORE_PWRCTL_MASK);
Asutosh Das0ef24812012-12-18 16:14:02 +05305040
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305041#ifdef CONFIG_MMC_CLKGATE
5042 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
5043 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
5044#endif
5045
Asutosh Das0ef24812012-12-18 16:14:02 +05305046 /* Set host capabilities */
5047 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
5048 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005049 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05305050 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05305051 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005052 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08005053 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03005054 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05305055 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07005056 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03005057 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305058 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
Asutosh Das0ef24812012-12-18 16:14:02 +05305059
5060 if (msm_host->pdata->nonremovable)
5061 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
5062
Guoping Yuf7c91332014-08-20 16:56:18 +08005063 if (msm_host->pdata->nonhotplug)
5064 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
5065
Subhash Jadavani7ae9c2c2017-03-31 16:50:59 -07005066 msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
5067
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305068 /* Initialize ICE if present */
5069 if (msm_host->ice.pdev) {
5070 ret = sdhci_msm_ice_init(host);
5071 if (ret) {
5072 dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
5073 mmc_hostname(host->mmc), ret);
5074 ret = -EINVAL;
5075 goto vreg_deinit;
5076 }
5077 host->is_crypto_en = true;
Veerabhadrarao Badigantife3088f2018-05-22 11:48:01 +05305078 msm_host->mmc->inlinecrypt_support = true;
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305079 /* Packed commands cannot be encrypted/decrypted using ICE */
5080 msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
5081 MMC_CAP2_PACKED_WR_CONTROL);
5082 }
5083
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05305084 init_completion(&msm_host->pwr_irq_completion);
5085
Sahitya Tummala581df132013-03-12 14:57:46 +05305086 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05305087 /*
5088 * Set up the card detect GPIO in active configuration before
5089 * configuring it as an IRQ. Otherwise, it can be in some
5090 * weird/inconsistent state resulting in flood of interrupts.
5091 */
5092 sdhci_msm_setup_pins(msm_host->pdata, true);
5093
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05305094 /*
5095 * This delay is needed for stabilizing the card detect GPIO
5096 * line after changing the pull configs.
5097 */
5098 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05305099 ret = mmc_gpio_request_cd(msm_host->mmc,
5100 msm_host->pdata->status_gpio, 0);
5101 if (ret) {
5102 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
5103 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305104 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05305105 }
5106 }
5107
Krishna Konda7feab352013-09-17 23:55:40 -07005108 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
5109 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
5110 host->dma_mask = DMA_BIT_MASK(64);
5111 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305112 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07005113 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305114 host->dma_mask = DMA_BIT_MASK(32);
5115 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05305116 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05305117 } else {
5118 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
5119 }
5120
Ritesh Harjani42876f42015-11-17 17:46:51 +05305121 msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
5122 "sdiowakeup_irq");
Ritesh Harjani42876f42015-11-17 17:46:51 +05305123 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305124 dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
5125 msm_host->pdata->sdiowakeup_irq);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305126 msm_host->is_sdiowakeup_enabled = true;
5127 ret = request_irq(msm_host->pdata->sdiowakeup_irq,
5128 sdhci_msm_sdiowakeup_irq,
5129 IRQF_SHARED | IRQF_TRIGGER_HIGH,
5130 "sdhci-msm sdiowakeup", host);
5131 if (ret) {
5132 dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
5133 __func__, msm_host->pdata->sdiowakeup_irq, ret);
5134 msm_host->pdata->sdiowakeup_irq = -1;
5135 msm_host->is_sdiowakeup_enabled = false;
5136 goto vreg_deinit;
5137 } else {
5138 spin_lock_irqsave(&host->lock, flags);
5139 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305140 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305141 spin_unlock_irqrestore(&host->lock, flags);
5142 }
5143 }
5144
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07005145 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305146 ret = sdhci_add_host(host);
5147 if (ret) {
5148 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05305149 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05305150 }
5151
Veerabhadrarao Badigantieed1dea2017-06-21 19:27:32 +05305152 msm_host->pltfm_init_done = true;
5153
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005154 pm_runtime_set_active(&pdev->dev);
5155 pm_runtime_enable(&pdev->dev);
5156 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
5157 pm_runtime_use_autosuspend(&pdev->dev);
5158
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305159 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
5160 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
5161 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
5162 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
5163 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
5164 ret = device_create_file(&pdev->dev,
5165 &msm_host->msm_bus_vote.max_bus_bw);
5166 if (ret)
5167 goto remove_host;
5168
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305169 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
5170 msm_host->polling.show = show_polling;
5171 msm_host->polling.store = store_polling;
5172 sysfs_attr_init(&msm_host->polling.attr);
5173 msm_host->polling.attr.name = "polling";
5174 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
5175 ret = device_create_file(&pdev->dev, &msm_host->polling);
5176 if (ret)
5177 goto remove_max_bus_bw_file;
5178 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05305179
5180 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
5181 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
5182 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
5183 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
5184 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
5185 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5186 if (ret) {
5187 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
5188 mmc_hostname(host->mmc), __func__, ret);
5189 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
5190 }
Ankit Jain1d7e5182017-09-20 11:55:38 +05305191 if (sdhci_msm_is_bootdevice(&pdev->dev))
5192 mmc_flush_detect_work(host->mmc);
5193
Asutosh Das0ef24812012-12-18 16:14:02 +05305194 /* Successful initialization */
5195 goto out;
5196
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305197remove_max_bus_bw_file:
5198 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05305199remove_host:
5200 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005201 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05305202 sdhci_remove_host(host, dead);
5203vreg_deinit:
5204 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05305205bus_unregister:
5206 if (msm_host->msm_bus_vote.client_handle)
5207 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5208 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07005209sleep_clk_disable:
5210 if (!IS_ERR(msm_host->sleep_clk))
5211 clk_disable_unprepare(msm_host->sleep_clk);
5212ff_clk_disable:
5213 if (!IS_ERR(msm_host->ff_clk))
5214 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305215clk_disable:
5216 if (!IS_ERR(msm_host->clk))
5217 clk_disable_unprepare(msm_host->clk);
Vijay Viswanath674aeda2017-10-07 14:28:15 +05305218bus_aggr_clk_disable:
5219 if (!IS_ERR(msm_host->bus_aggr_clk))
5220 clk_disable_unprepare(msm_host->bus_aggr_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05305221pclk_disable:
5222 if (!IS_ERR(msm_host->pclk))
5223 clk_disable_unprepare(msm_host->pclk);
5224bus_clk_disable:
5225 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
5226 clk_disable_unprepare(msm_host->bus_clk);
5227pltfm_free:
5228 sdhci_pltfm_free(pdev);
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305229out_host_free:
5230 devm_kfree(&pdev->dev, msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05305231out:
5232 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
5233 return ret;
5234}
5235
5236static int sdhci_msm_remove(struct platform_device *pdev)
5237{
5238 struct sdhci_host *host = platform_get_drvdata(pdev);
5239 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5240 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5241 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305242 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
5243 int i;
Asutosh Das0ef24812012-12-18 16:14:02 +05305244 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
5245 0xffffffff);
5246
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305247 pr_debug("%s: %s Enter\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05305248 if (!gpio_is_valid(msm_host->pdata->status_gpio))
5249 device_remove_file(&pdev->dev, &msm_host->polling);
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305250
5251 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305252 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005253 pm_runtime_disable(&pdev->dev);
Vijay Viswanath1971d222018-03-01 12:01:47 +05305254
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305255 if (msm_host->pm_qos_group_enable) {
5256 struct sdhci_msm_pm_qos_group *group;
5257
5258 for (i = 0; i < nr_groups; i++)
5259 cancel_delayed_work_sync(
5260 &msm_host->pm_qos[i].unvote_work);
5261
5262 device_remove_file(&msm_host->pdev->dev,
5263 &msm_host->pm_qos_group_enable_attr);
5264 device_remove_file(&msm_host->pdev->dev,
5265 &msm_host->pm_qos_group_status_attr);
5266
5267 for (i = 0; i < nr_groups; i++) {
5268 group = &msm_host->pm_qos[i];
5269 pm_qos_remove_request(&group->req);
5270 }
5271 }
5272
5273 if (msm_host->pm_qos_irq.enabled) {
5274 cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
5275 device_remove_file(&pdev->dev,
5276 &msm_host->pm_qos_irq.enable_attr);
5277 device_remove_file(&pdev->dev,
5278 &msm_host->pm_qos_irq.status_attr);
5279 pm_qos_remove_request(&msm_host->pm_qos_irq.req);
5280 }
5281
Vijay Viswanath1971d222018-03-01 12:01:47 +05305282 if (msm_host->pm_qos_wq)
5283 destroy_workqueue(msm_host->pm_qos_wq);
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305284
Asutosh Das0ef24812012-12-18 16:14:02 +05305285 sdhci_remove_host(host, dead);
Sahitya Tummala581df132013-03-12 14:57:46 +05305286
Asutosh Das0ef24812012-12-18 16:14:02 +05305287 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05305288
Pratibhasagar V9acf2642013-11-21 21:07:21 +05305289 sdhci_msm_setup_pins(pdata, true);
5290 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05305291
5292 if (msm_host->msm_bus_vote.client_handle) {
5293 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5294 sdhci_msm_bus_unregister(msm_host);
5295 }
Vijay Viswanath44c37b72018-07-04 14:02:52 +05305296
5297 sdhci_pltfm_free(pdev);
5298
Asutosh Das0ef24812012-12-18 16:14:02 +05305299 return 0;
5300}
5301
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005302#ifdef CONFIG_PM
Ritesh Harjani42876f42015-11-17 17:46:51 +05305303static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
5304{
5305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5306 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5307 unsigned long flags;
5308 int ret = 0;
5309
5310 if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
5311 sdhci_is_valid_gpio_wakeup_int(msm_host) &&
5312 mmc_card_wake_sdio_irq(host->mmc))) {
Sahitya Tummala7cd1e422016-01-12 16:40:50 +05305313 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305314 return 1;
5315 }
5316
5317 spin_lock_irqsave(&host->lock, flags);
5318 if (enable) {
5319 /* configure DAT1 gpio if applicable */
5320 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305321 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305322 ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5323 if (!ret)
5324 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
5325 goto out;
5326 } else {
5327 pr_err("%s: sdiowakeup_irq(%d) invalid\n",
5328 mmc_hostname(host->mmc), enable);
5329 }
5330 } else {
5331 if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
5332 ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
5333 sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305334 msm_host->sdio_pending_processing = false;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305335 } else {
5336 pr_err("%s: sdiowakeup_irq(%d)invalid\n",
5337 mmc_hostname(host->mmc), enable);
5338
5339 }
5340 }
5341out:
5342 if (ret)
5343 pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
5344 mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
5345 ret, msm_host->pdata->sdiowakeup_irq);
5346 spin_unlock_irqrestore(&host->lock, flags);
5347 return ret;
5348}
5349
5350
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005351static int sdhci_msm_runtime_suspend(struct device *dev)
5352{
5353 struct sdhci_host *host = dev_get_drvdata(dev);
5354 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5355 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005356 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305357 int ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005358
Ritesh Harjani42876f42015-11-17 17:46:51 +05305359 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5360 goto defer_disable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305361
Ritesh Harjani42876f42015-11-17 17:46:51 +05305362 sdhci_cfg_irq(host, false, true);
5363
5364defer_disable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005365 disable_irq(msm_host->pwr_irq);
5366
5367 /*
5368 * Remove the vote immediately only if clocks are off in which
5369 * case we might have queued work to remove vote but it may not
5370 * be completed before runtime suspend or system suspend.
5371 */
5372 if (!atomic_read(&msm_host->clks_on)) {
5373 if (msm_host->msm_bus_vote.client_handle)
5374 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
5375 }
5376
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305377 if (host->is_crypto_en) {
5378 ret = sdhci_msm_ice_suspend(host);
5379 if (ret < 0)
5380 pr_err("%s: failed to suspend crypto engine %d\n",
5381 mmc_hostname(host->mmc), ret);
5382 }
Konstantin Dorfman98edaa12015-06-11 10:05:18 +03005383 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
5384 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005385 return 0;
5386}
5387
5388static int sdhci_msm_runtime_resume(struct device *dev)
5389{
5390 struct sdhci_host *host = dev_get_drvdata(dev);
5391 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5392 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005393 ktime_t start = ktime_get();
Sahitya Tummala9325fb02015-05-08 11:53:29 +05305394 int ret;
5395
5396 if (host->is_crypto_en) {
5397 ret = sdhci_msm_enable_controller_clock(host);
5398 if (ret) {
5399 pr_err("%s: Failed to enable reqd clocks\n",
5400 mmc_hostname(host->mmc));
5401 goto skip_ice_resume;
5402 }
5403 ret = sdhci_msm_ice_resume(host);
5404 if (ret)
5405 pr_err("%s: failed to resume crypto engine %d\n",
5406 mmc_hostname(host->mmc), ret);
5407 }
5408skip_ice_resume:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005409
Ritesh Harjani42876f42015-11-17 17:46:51 +05305410 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5411 goto defer_enable_host_irq;
Pavan Anamula45ef1372015-10-29 23:22:12 +05305412
Ritesh Harjani42876f42015-11-17 17:46:51 +05305413 sdhci_cfg_irq(host, true, true);
5414
5415defer_enable_host_irq:
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005416 enable_irq(msm_host->pwr_irq);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005417
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005418 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
5419 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005420 return 0;
5421}
5422
5423static int sdhci_msm_suspend(struct device *dev)
5424{
5425 struct sdhci_host *host = dev_get_drvdata(dev);
5426 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5427 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005428 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305429 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005430 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005431
5432 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5433 (msm_host->mmc->slot.cd_irq >= 0))
5434 disable_irq(msm_host->mmc->slot.cd_irq);
5435
5436 if (pm_runtime_suspended(dev)) {
5437 pr_debug("%s: %s: already runtime suspended\n",
5438 mmc_hostname(host->mmc), __func__);
5439 goto out;
5440 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005441 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005442out:
Sayali Lokhandeb30295162016-11-18 16:05:50 +05305443 sdhci_msm_disable_controller_clock(host);
Ritesh Harjani42876f42015-11-17 17:46:51 +05305444 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5445 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
5446 if (sdio_cfg)
5447 sdhci_cfg_irq(host, false, true);
5448 }
5449
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005450 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
5451 ktime_to_us(ktime_sub(ktime_get(), start)));
5452 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005453}
5454
5455static int sdhci_msm_resume(struct device *dev)
5456{
5457 struct sdhci_host *host = dev_get_drvdata(dev);
5458 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5459 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5460 int ret = 0;
Ritesh Harjani42876f42015-11-17 17:46:51 +05305461 int sdio_cfg = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005462 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005463
5464 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
5465 (msm_host->mmc->slot.cd_irq >= 0))
5466 enable_irq(msm_host->mmc->slot.cd_irq);
5467
5468 if (pm_runtime_suspended(dev)) {
5469 pr_debug("%s: %s: runtime suspended, defer system resume\n",
5470 mmc_hostname(host->mmc), __func__);
5471 goto out;
5472 }
5473
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005474 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005475out:
Ritesh Harjani42876f42015-11-17 17:46:51 +05305476 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
5477 sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
5478 if (sdio_cfg)
5479 sdhci_cfg_irq(host, true, true);
5480 }
5481
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02005482 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
5483 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005484 return ret;
5485}
5486
Ritesh Harjani42876f42015-11-17 17:46:51 +05305487static int sdhci_msm_suspend_noirq(struct device *dev)
5488{
5489 struct sdhci_host *host = dev_get_drvdata(dev);
5490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5491 struct sdhci_msm_host *msm_host = pltfm_host->priv;
5492 int ret = 0;
5493
5494 /*
5495 * ksdioirqd may be running, hence retry
5496 * suspend in case the clocks are ON
5497 */
5498 if (atomic_read(&msm_host->clks_on)) {
5499 pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
5500 mmc_hostname(host->mmc), __func__);
5501 ret = -EAGAIN;
5502 }
5503
Ritesh Harjanib5c8e172015-12-17 19:59:04 +05305504 if (host->mmc->card && mmc_card_sdio(host->mmc->card))
5505 if (msm_host->sdio_pending_processing)
5506 ret = -EBUSY;
5507
Ritesh Harjani42876f42015-11-17 17:46:51 +05305508 return ret;
5509}
5510
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005511static const struct dev_pm_ops sdhci_msm_pmops = {
Vijay Viswanathd8936f82017-07-20 15:50:19 +05305512 SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005513 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
5514 NULL)
Ritesh Harjani42876f42015-11-17 17:46:51 +05305515 .suspend_noirq = sdhci_msm_suspend_noirq,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005516};
5517
5518#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
5519
5520#else
5521#define SDHCI_MSM_PMOPS NULL
5522#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05305523static const struct of_device_id sdhci_msm_dt_match[] = {
5524 {.compatible = "qcom,sdhci-msm"},
Sayali Lokhandef0ee1ff2016-08-25 20:46:01 +05305525 {.compatible = "qcom,sdhci-msm-v5"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07005526 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05305527};
5528MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
5529
5530static struct platform_driver sdhci_msm_driver = {
5531 .probe = sdhci_msm_probe,
5532 .remove = sdhci_msm_remove,
5533 .driver = {
5534 .name = "sdhci_msm",
5535 .owner = THIS_MODULE,
Lingutla Chandrasekhare73832d2016-09-07 15:59:56 +05305536 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305537 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02005538 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05305539 },
5540};
5541
5542module_platform_driver(sdhci_msm_driver);
5543
5544MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
5545MODULE_LICENSE("GPL v2");