blob: 7de8486e91a1e2f21cd293424e1ccde07cc345a4 [file] [log] [blame]
Asutosh Das0ef24812012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
3 * driver source file
4 *
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -08005 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
Asutosh Das0ef24812012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
Sahitya Tummala581df132013-03-12 14:57:46 +053034#include <linux/mmc/slot-gpio.h>
Sahitya Tummalaeaa21862013-03-20 19:34:59 +053035#include <linux/dma-mapping.h>
Sahitya Tummala66b0fe32013-04-25 11:50:56 +053036#include <linux/iopoll.h>
Pratibhasagar V9acf2642013-11-21 21:07:21 +053037#include <linux/pinctrl/consumer.h>
38#include <linux/iopoll.h>
Sahitya Tummala8a3e8182013-03-10 14:12:52 +053039#include <linux/msm-bus.h>
Konstantin Dorfman98377d32015-02-25 10:09:41 +020040#include <linux/pm_runtime.h>
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +020041#include <trace/events/mmc.h>
Asutosh Das0ef24812012-12-18 16:14:02 +053042
Sahitya Tummala56874732015-05-21 08:24:03 +053043#include "sdhci-msm.h"
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -070044#include "cmdq_hci.h"
Asutosh Das0ef24812012-12-18 16:14:02 +053045
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080046#define CORE_POWER 0x0
47#define CORE_SW_RST (1 << 7)
48
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -070049#define SDHCI_VER_100 0x2B
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080050#define CORE_MCI_DATA_CNT 0x30
51#define CORE_MCI_STATUS 0x34
52#define CORE_MCI_FIFO_CNT 0x44
53
54#define CORE_VERSION_STEP_MASK 0x0000FFFF
55#define CORE_VERSION_MINOR_MASK 0x0FFF0000
56#define CORE_VERSION_MINOR_SHIFT 16
57#define CORE_VERSION_MAJOR_MASK 0xF0000000
58#define CORE_VERSION_MAJOR_SHIFT 28
59#define CORE_VERSION_TARGET_MASK 0x000000FF
Konstantin Dorfman98543bf2015-10-01 17:56:54 +030060#define SDHCI_MSM_VER_420 0x49
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080061
62#define CORE_GENERICS 0x70
63#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +053064
65#define CORE_VERSION_MAJOR_MASK 0xF0000000
66#define CORE_VERSION_MAJOR_SHIFT 28
67
Asutosh Das0ef24812012-12-18 16:14:02 +053068#define CORE_HC_MODE 0x78
69#define HC_MODE_EN 0x1
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -070070#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das0ef24812012-12-18 16:14:02 +053071
Sahitya Tummala67717bc2013-08-02 09:21:37 +053072#define CORE_MCI_VERSION 0x050
73#define CORE_TESTBUS_CONFIG 0x0CC
74#define CORE_TESTBUS_ENA (1 << 3)
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -080075#define CORE_TESTBUS_SEL2_BIT 4
76#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
Sahitya Tummala67717bc2013-08-02 09:21:37 +053077
Asutosh Das0ef24812012-12-18 16:14:02 +053078#define CORE_PWRCTL_STATUS 0xDC
79#define CORE_PWRCTL_MASK 0xE0
80#define CORE_PWRCTL_CLEAR 0xE4
81#define CORE_PWRCTL_CTL 0xE8
82
83#define CORE_PWRCTL_BUS_OFF 0x01
84#define CORE_PWRCTL_BUS_ON (1 << 1)
85#define CORE_PWRCTL_IO_LOW (1 << 2)
86#define CORE_PWRCTL_IO_HIGH (1 << 3)
87
88#define CORE_PWRCTL_BUS_SUCCESS 0x01
89#define CORE_PWRCTL_BUS_FAIL (1 << 1)
90#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
91#define CORE_PWRCTL_IO_FAIL (1 << 3)
92
93#define INT_MASK 0xF
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070094#define MAX_PHASES 16
95
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -070096#define CORE_DLL_CONFIG 0x100
97#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -070098#define CORE_DLL_EN (1 << 16)
99#define CORE_CDR_EN (1 << 17)
100#define CORE_CK_OUT_EN (1 << 18)
101#define CORE_CDR_EXT_EN (1 << 19)
102#define CORE_DLL_PDN (1 << 29)
103#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700104
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700105#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700106#define CORE_DLL_LOCK (1 << 7)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700107#define CORE_DDR_DLL_LOCK (1 << 11)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700108
109#define CORE_VENDOR_SPEC 0x10C
Krishna Konda46fd1432014-10-30 21:13:27 -0700110#define CORE_CLK_PWRSAVE (1 << 1)
111#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
112#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
113#define CORE_HC_MCLK_SEL_MASK (3 << 8)
114#define CORE_HC_AUTO_CMD21_EN (1 << 6)
115#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700116#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700117#define CORE_HC_SELECT_IN_EN (1 << 18)
118#define CORE_HC_SELECT_IN_HS400 (6 << 19)
119#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -0700120#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700121
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800122#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
123#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
124
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530125#define CORE_VENDOR_SPEC_FUNC2 0x110
Pavan Anamula691dd592015-08-25 16:11:20 +0530126#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
127#define HC_SW_RST_REQ (1 << 21)
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +0530128#define CORE_ONE_MID_EN (1 << 25)
129
Krishna Konda7feab352013-09-17 23:55:40 -0700130#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +0530131#define CORE_8_BIT_SUPPORT (1 << 18)
132#define CORE_3_3V_SUPPORT (1 << 24)
133#define CORE_3_0V_SUPPORT (1 << 25)
134#define CORE_1_8V_SUPPORT (1 << 26)
Gilad Broner2a10ca02014-10-02 17:20:35 +0300135#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
Krishna Konda7feab352013-09-17 23:55:40 -0700136
Venkat Gopalakrishnan0a92d532014-12-16 17:31:00 -0800137#define CORE_SDCC_DEBUG_REG 0x124
Sahitya Tummala67717bc2013-08-02 09:21:37 +0530138
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700139#define CORE_CSR_CDC_CTLR_CFG0 0x130
140#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
141#define CORE_HW_AUTOCAL_ENA (1 << 17)
142
143#define CORE_CSR_CDC_CTLR_CFG1 0x134
144#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
145#define CORE_TIMER_ENA (1 << 16)
146
147#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
148#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
149#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
150#define CORE_CDC_OFFSET_CFG 0x14C
151#define CORE_CSR_CDC_DELAY_CFG 0x150
152#define CORE_CDC_SLAVE_DDA_CFG 0x160
153#define CORE_CSR_CDC_STATUS0 0x164
154#define CORE_CALIBRATION_DONE (1 << 0)
155
156#define CORE_CDC_ERROR_CODE_MASK 0x7000000
157
Konstantin Dorfman98543bf2015-10-01 17:56:54 +0300158#define CQ_CMD_DBG_RAM 0x110
159#define CQ_CMD_DBG_RAM_WA 0x150
160#define CQ_CMD_DBG_RAM_OL 0x154
161
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700162#define CORE_CSR_CDC_GEN_CFG 0x178
163#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
164#define CORE_CDC_SWITCH_RC_EN (1 << 1)
165
166#define CORE_DDR_200_CFG 0x184
167#define CORE_CDC_T4_DLY_SEL (1 << 0)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530168#define CORE_CMDIN_RCLK_EN (1 << 1)
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700169#define CORE_START_CDC_TRAFFIC (1 << 6)
Ritesh Harjaniea709662015-05-27 15:40:24 +0530170
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700171#define CORE_VENDOR_SPEC3 0x1B0
172#define CORE_PWRSAVE_DLL (1 << 3)
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +0530173#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700174
175#define CORE_DLL_CONFIG_2 0x1B4
176#define CORE_DDR_CAL_EN (1 << 0)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800177#define CORE_FLL_CYCLE_CNT (1 << 18)
178#define CORE_DLL_CLOCK_DISABLE (1 << 21)
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700179
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530180#define CORE_DDR_CONFIG 0x1B8
181#define DDR_CONFIG_POR_VAL 0x80040853
182#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
183#define DDR_CONFIG_PRG_RCLK_DLY 115
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700184#define CORE_DDR_CONFIG_2 0x1BC
185#define DDR_CONFIG_2_POR_VAL 0x80040873
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700186
Venkat Gopalakrishnan450745e2014-07-24 20:39:34 -0700187/* 512 descriptors */
188#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +0530189#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das648f9d12013-01-10 21:11:04 +0530190
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700191#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800192#define TCXO_FREQ 19200000
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700193
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700194#define INVALID_TUNING_PHASE -1
195
Krishna Konda96e6b112013-10-28 15:25:03 -0700196#define NUM_TUNING_PHASES 16
Talel Shenhar6f0f3412015-11-08 14:21:31 +0200197#define MAX_DRV_TYPES_SUPPORTED_HS200 4
Konstantin Dorfman98377d32015-02-25 10:09:41 +0200198#define MSM_AUTOSUSPEND_DELAY_MS 100
Krishna Konda96e6b112013-10-28 15:25:03 -0700199
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700200static const u32 tuning_block_64[] = {
201 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
202 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
203 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
204 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
205};
206
207static const u32 tuning_block_128[] = {
208 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
209 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
210 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
211 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
212 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
213 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
214 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
215 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
216};
Asutosh Das0ef24812012-12-18 16:14:02 +0530217
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -0700218/* global to hold each slot instance for debug */
219static struct sdhci_msm_host *sdhci_slot[2];
220
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -0700221static int disable_slots;
222/* root can write, others read */
223module_param(disable_slots, int, S_IRUGO|S_IWUSR);
224
Asutosh Das0ef24812012-12-18 16:14:02 +0530225enum vdd_io_level {
226 /* set vdd_io_data->low_vol_level */
227 VDD_IO_LOW,
228 /* set vdd_io_data->high_vol_level */
229 VDD_IO_HIGH,
230 /*
231 * set whatever there in voltage_level (third argument) of
232 * sdhci_msm_set_vdd_io_vol() function.
233 */
234 VDD_IO_SET_LEVEL,
235};
236
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700237/* MSM platform specific tuning */
238static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
239 u8 poll)
240{
241 int rc = 0;
242 u32 wait_cnt = 50;
243 u8 ck_out_en = 0;
244 struct mmc_host *mmc = host->mmc;
245
246 /* poll for CK_OUT_EN bit. max. poll time = 50us */
247 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
248 CORE_CK_OUT_EN);
249
250 while (ck_out_en != poll) {
251 if (--wait_cnt == 0) {
252 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
253 mmc_hostname(mmc), __func__, poll);
254 rc = -ETIMEDOUT;
255 goto out;
256 }
257 udelay(1);
258
259 ck_out_en = !!(readl_relaxed(host->ioaddr +
260 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
261 }
262out:
263 return rc;
264}
265
Asutosh Dase5e9ca62013-07-30 19:08:36 +0530266/*
267 * Enable CDR to track changes of DAT lines and adjust sampling
268 * point according to voltage/temperature variations
269 */
270static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
271{
272 int rc = 0;
273 u32 config;
274
275 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
276 config |= CORE_CDR_EN;
277 config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
278 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
279
280 rc = msm_dll_poll_ck_out_en(host, 0);
281 if (rc)
282 goto err;
283
284 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
285 CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
286
287 rc = msm_dll_poll_ck_out_en(host, 1);
288 if (rc)
289 goto err;
290 goto out;
291err:
292 pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
293out:
294 return rc;
295}
296
297static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
298 *attr, const char *buf, size_t count)
299{
300 struct sdhci_host *host = dev_get_drvdata(dev);
301 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
302 struct sdhci_msm_host *msm_host = pltfm_host->priv;
303 u32 tmp;
304 unsigned long flags;
305
306 if (!kstrtou32(buf, 0, &tmp)) {
307 spin_lock_irqsave(&host->lock, flags);
308 msm_host->en_auto_cmd21 = !!tmp;
309 spin_unlock_irqrestore(&host->lock, flags);
310 }
311 return count;
312}
313
314static ssize_t show_auto_cmd21(struct device *dev,
315 struct device_attribute *attr, char *buf)
316{
317 struct sdhci_host *host = dev_get_drvdata(dev);
318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
319 struct sdhci_msm_host *msm_host = pltfm_host->priv;
320
321 return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
322}
323
324/* MSM auto-tuning handler */
325static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
326 bool enable,
327 u32 type)
328{
329 int rc = 0;
330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331 struct sdhci_msm_host *msm_host = pltfm_host->priv;
332 u32 val = 0;
333
334 if (!msm_host->en_auto_cmd21)
335 return 0;
336
337 if (type == MMC_SEND_TUNING_BLOCK_HS200)
338 val = CORE_HC_AUTO_CMD21_EN;
339 else
340 return 0;
341
342 if (enable) {
343 rc = msm_enable_cdr_cm_sdc4_dll(host);
344 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
345 val, host->ioaddr + CORE_VENDOR_SPEC);
346 } else {
347 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
348 ~val, host->ioaddr + CORE_VENDOR_SPEC);
349 }
350 return rc;
351}
352
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700353static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
354{
355 int rc = 0;
356 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
357 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
358 0x8};
359 unsigned long flags;
360 u32 config;
361 struct mmc_host *mmc = host->mmc;
362
363 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
364 spin_lock_irqsave(&host->lock, flags);
365
366 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
367 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
368 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
369 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
370
371 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
372 rc = msm_dll_poll_ck_out_en(host, 0);
373 if (rc)
374 goto err_out;
375
376 /*
377 * Write the selected DLL clock output phase (0 ... 15)
378 * to CDR_SELEXT bit field of DLL_CONFIG register.
379 */
380 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
381 & ~(0xF << 20))
382 | (grey_coded_phase_table[phase] << 20)),
383 host->ioaddr + CORE_DLL_CONFIG);
384
385 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
386 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
387 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
388
389 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
390 rc = msm_dll_poll_ck_out_en(host, 1);
391 if (rc)
392 goto err_out;
393
394 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
395 config |= CORE_CDR_EN;
396 config &= ~CORE_CDR_EXT_EN;
397 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
398 goto out;
399
400err_out:
401 pr_err("%s: %s: Failed to set DLL phase: %d\n",
402 mmc_hostname(mmc), __func__, phase);
403out:
404 spin_unlock_irqrestore(&host->lock, flags);
405 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
406 return rc;
407}
408
409/*
410 * Find out the greatest range of consecuitive selected
411 * DLL clock output phases that can be used as sampling
412 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700413 * timing mode) or for eMMC4.5 card read operation (in
414 * HS400/HS200 timing mode).
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700415 * Select the 3/4 of the range and configure the DLL with the
416 * selected DLL clock output phase.
417 */
418
419static int msm_find_most_appropriate_phase(struct sdhci_host *host,
420 u8 *phase_table, u8 total_phases)
421{
422 int ret;
423 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
424 u8 phases_per_row[MAX_PHASES] = {0};
425 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
426 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
427 bool phase_0_found = false, phase_15_found = false;
428 struct mmc_host *mmc = host->mmc;
429
430 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
431 if (!total_phases || (total_phases > MAX_PHASES)) {
432 pr_err("%s: %s: invalid argument: total_phases=%d\n",
433 mmc_hostname(mmc), __func__, total_phases);
434 return -EINVAL;
435 }
436
437 for (cnt = 0; cnt < total_phases; cnt++) {
438 ranges[row_index][col_index] = phase_table[cnt];
439 phases_per_row[row_index] += 1;
440 col_index++;
441
442 if ((cnt + 1) == total_phases) {
443 continue;
444 /* check if next phase in phase_table is consecutive or not */
445 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
446 row_index++;
447 col_index = 0;
448 }
449 }
450
451 if (row_index >= MAX_PHASES)
452 return -EINVAL;
453
454 /* Check if phase-0 is present in first valid window? */
455 if (!ranges[0][0]) {
456 phase_0_found = true;
457 phase_0_raw_index = 0;
458 /* Check if cycle exist between 2 valid windows */
459 for (cnt = 1; cnt <= row_index; cnt++) {
460 if (phases_per_row[cnt]) {
461 for (i = 0; i < phases_per_row[cnt]; i++) {
462 if (ranges[cnt][i] == 15) {
463 phase_15_found = true;
464 phase_15_raw_index = cnt;
465 break;
466 }
467 }
468 }
469 }
470 }
471
472 /* If 2 valid windows form cycle then merge them as single window */
473 if (phase_0_found && phase_15_found) {
474 /* number of phases in raw where phase 0 is present */
475 u8 phases_0 = phases_per_row[phase_0_raw_index];
476 /* number of phases in raw where phase 15 is present */
477 u8 phases_15 = phases_per_row[phase_15_raw_index];
478
479 if (phases_0 + phases_15 >= MAX_PHASES)
480 /*
481 * If there are more than 1 phase windows then total
482 * number of phases in both the windows should not be
483 * more than or equal to MAX_PHASES.
484 */
485 return -EINVAL;
486
487 /* Merge 2 cyclic windows */
488 i = phases_15;
489 for (cnt = 0; cnt < phases_0; cnt++) {
490 ranges[phase_15_raw_index][i] =
491 ranges[phase_0_raw_index][cnt];
492 if (++i >= MAX_PHASES)
493 break;
494 }
495
496 phases_per_row[phase_0_raw_index] = 0;
497 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
498 }
499
500 for (cnt = 0; cnt <= row_index; cnt++) {
501 if (phases_per_row[cnt] > curr_max) {
502 curr_max = phases_per_row[cnt];
503 selected_row_index = cnt;
504 }
505 }
506
507 i = ((curr_max * 3) / 4);
508 if (i)
509 i--;
510
511 ret = (int)ranges[selected_row_index][i];
512
513 if (ret >= MAX_PHASES) {
514 ret = -EINVAL;
515 pr_err("%s: %s: invalid phase selected=%d\n",
516 mmc_hostname(mmc), __func__, ret);
517 }
518
519 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
520 return ret;
521}
522
523static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
524{
525 u32 mclk_freq = 0;
526
527 /* Program the MCLK value to MCLK_FREQ bit field */
528 if (host->clock <= 112000000)
529 mclk_freq = 0;
530 else if (host->clock <= 125000000)
531 mclk_freq = 1;
532 else if (host->clock <= 137000000)
533 mclk_freq = 2;
534 else if (host->clock <= 150000000)
535 mclk_freq = 3;
536 else if (host->clock <= 162000000)
537 mclk_freq = 4;
538 else if (host->clock <= 175000000)
539 mclk_freq = 5;
540 else if (host->clock <= 187000000)
541 mclk_freq = 6;
542 else if (host->clock <= 200000000)
543 mclk_freq = 7;
544
545 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
546 & ~(7 << 24)) | (mclk_freq << 24)),
547 host->ioaddr + CORE_DLL_CONFIG);
548}
549
550/* Initialize the DLL (Programmable Delay Line ) */
551static int msm_init_cm_dll(struct sdhci_host *host)
552{
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
554 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700555 struct mmc_host *mmc = host->mmc;
556 int rc = 0;
557 unsigned long flags;
558 u32 wait_cnt;
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530559 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700560
561 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
562 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530563 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
564 CORE_CLK_PWRSAVE);
565 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700566 /*
567 * Make sure that clock is always enabled when DLL
568 * tuning is in progress. Keeping PWRSAVE ON may
569 * turn off the clock. So let's disable the PWRSAVE
570 * here and re-enable it once tuning is completed.
571 */
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530572 if (prev_pwrsave) {
573 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
574 & ~CORE_CLK_PWRSAVE),
575 host->ioaddr + CORE_VENDOR_SPEC);
576 curr_pwrsave = false;
577 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700578
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800579 if (msm_host->use_updated_dll_reset) {
580 /* Disable the DLL clock */
581 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
582 & ~CORE_CK_OUT_EN),
583 host->ioaddr + CORE_DLL_CONFIG);
584
585 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
586 | CORE_DLL_CLOCK_DISABLE),
587 host->ioaddr + CORE_DLL_CONFIG_2);
588 }
589
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700590 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
591 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
592 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
593
594 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
595 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
596 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
597 msm_cm_dll_set_freq(host);
598
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800599 if (msm_host->use_updated_dll_reset) {
600 u32 mclk_freq = 0;
601
602 if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
603 & CORE_FLL_CYCLE_CNT))
604 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
605 else
606 mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
607
608 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
609 & ~(0xFF << 10)) | (mclk_freq << 10)),
610 host->ioaddr + CORE_DLL_CONFIG_2);
611 /* wait for 5us before enabling DLL clock */
612 udelay(5);
613 }
614
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700615 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
616 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
617 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
618
619 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
620 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
621 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
622
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -0800623 if (msm_host->use_updated_dll_reset) {
624 msm_cm_dll_set_freq(host);
625 /* Enable the DLL clock */
626 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
627 & ~CORE_DLL_CLOCK_DISABLE),
628 host->ioaddr + CORE_DLL_CONFIG_2);
629 }
630
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700631 /* Set DLL_EN bit to 1. */
632 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
633 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
634
635 /* Set CK_OUT_EN bit to 1. */
636 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
637 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
638
639 wait_cnt = 50;
640 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
641 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
642 CORE_DLL_LOCK)) {
643 /* max. wait for 50us sec for LOCK bit to be set */
644 if (--wait_cnt == 0) {
645 pr_err("%s: %s: DLL failed to LOCK\n",
646 mmc_hostname(mmc), __func__);
647 rc = -ETIMEDOUT;
648 goto out;
649 }
650 /* wait for 1us before polling again */
651 udelay(1);
652 }
653
654out:
Subhash Jadavani99bca3b2013-05-28 18:21:57 +0530655 /* Restore the correct PWRSAVE state */
656 if (prev_pwrsave ^ curr_pwrsave) {
657 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
658
659 if (prev_pwrsave)
660 reg |= CORE_CLK_PWRSAVE;
661 else
662 reg &= ~CORE_CLK_PWRSAVE;
663
664 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
665 }
666
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700667 spin_unlock_irqrestore(&host->lock, flags);
668 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
669 return rc;
670}
671
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700672static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
673{
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700674 u32 calib_done;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700675 int ret = 0;
676 int cdc_err = 0;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700677
678 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
679
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700680 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
681 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
682 & ~CORE_CDC_T4_DLY_SEL),
683 host->ioaddr + CORE_DDR_200_CFG);
684
685 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
686 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
687 & ~CORE_CDC_SWITCH_BYPASS_OFF),
688 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
689
690 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
691 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
692 | CORE_CDC_SWITCH_RC_EN),
693 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
694
695 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
696 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
697 & ~CORE_START_CDC_TRAFFIC),
698 host->ioaddr + CORE_DDR_200_CFG);
699
700 /*
701 * Perform CDC Register Initialization Sequence
702 *
703 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
704 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
705 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
706 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
707 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
708 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
709 * CORE_CSR_CDC_DELAY_CFG 0x3AC
710 * CORE_CDC_OFFSET_CFG 0x0
711 * CORE_CDC_SLAVE_DDA_CFG 0x16334
712 */
713
714 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
715 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
716 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
717 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
718 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
719 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
Subhash Jadavanibe406d92014-06-17 16:47:48 -0700720 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700721 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
722 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
723
724 /* CDC HW Calibration */
725
726 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
727 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
728 | CORE_SW_TRIG_FULL_CALIB),
729 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
730
731 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
732 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
733 & ~CORE_SW_TRIG_FULL_CALIB),
734 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
735
736 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
737 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
738 | CORE_HW_AUTOCAL_ENA),
739 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
740
741 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
742 writel_relaxed((readl_relaxed(host->ioaddr +
743 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
744 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
745
746 mb();
747
748 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700749 ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
750 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
751
752 if (ret == -ETIMEDOUT) {
753 pr_err("%s: %s: CDC Calibration was not completed\n",
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700754 mmc_hostname(host->mmc), __func__);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700755 goto out;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700756 }
757
758 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
759 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
760 & CORE_CDC_ERROR_CODE_MASK;
761 if (cdc_err) {
762 pr_err("%s: %s: CDC Error Code %d\n",
763 mmc_hostname(host->mmc), __func__, cdc_err);
764 ret = -EINVAL;
765 goto out;
766 }
767
768 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
769 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
770 | CORE_START_CDC_TRAFFIC),
771 host->ioaddr + CORE_DDR_200_CFG);
772out:
773 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
774 __func__, ret);
775 return ret;
776}
777
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700778static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
779{
Ritesh Harjani764065e2015-05-13 14:14:45 +0530780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
781 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530782 u32 dll_status, ddr_config;
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700783 int ret = 0;
784
785 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
786
787 /*
Pavan Anamulaf7bf5112015-08-21 18:09:42 +0530788 * Reprogramming the value in case it might have been modified by
789 * bootloaders.
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700790 */
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -0700791 if (msm_host->rclk_delay_fix) {
792 writel_relaxed(DDR_CONFIG_2_POR_VAL,
793 host->ioaddr + CORE_DDR_CONFIG_2);
794 } else {
795 ddr_config = DDR_CONFIG_POR_VAL &
796 ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
797 ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
798 writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
799 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700800
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530801 if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
Ritesh Harjaniea709662015-05-27 15:40:24 +0530802 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
803 | CORE_CMDIN_RCLK_EN),
804 host->ioaddr + CORE_DDR_200_CFG);
805
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700806 /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
807 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
808 | CORE_DDR_CAL_EN),
809 host->ioaddr + CORE_DLL_CONFIG_2);
810
811 /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
812 ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
813 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
814
815 if (ret == -ETIMEDOUT) {
816 pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
817 mmc_hostname(host->mmc), __func__);
818 goto out;
819 }
820
Ritesh Harjani764065e2015-05-13 14:14:45 +0530821 /*
822 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
823 * when MCLK is gated OFF, it is not gated for less than 0.5us
824 * and MCLK must be switched on for at-least 1us before DATA
825 * starts coming. Controllers with 14lpp tech DLL cannot
826 * guarantee above requirement. So PWRSAVE_DLL should not be
827 * turned on for host controllers using this DLL.
828 */
829 if (!msm_host->use_14lpp_dll)
830 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
831 | CORE_PWRSAVE_DLL),
832 host->ioaddr + CORE_VENDOR_SPEC3);
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700833 mb();
834out:
835 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
836 __func__, ret);
837 return ret;
838}
839
Ritesh Harjaniea709662015-05-27 15:40:24 +0530840static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
841{
842 int ret = 0;
843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 struct sdhci_msm_host *msm_host = pltfm_host->priv;
845 struct mmc_host *mmc = host->mmc;
846
847 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
848
Ritesh Harjani70e2a712015-08-25 11:34:16 +0530849 if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
850 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjaniea709662015-05-27 15:40:24 +0530851 mmc_hostname(mmc));
852 return -EINVAL;
853 }
854
855 if (msm_host->calibration_done ||
856 !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
857 return 0;
858 }
859
860 /*
861 * Reset the tuning block.
862 */
863 ret = msm_init_cm_dll(host);
864 if (ret)
865 goto out;
866
867 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
868out:
869 if (!ret)
870 msm_host->calibration_done = true;
871 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
872 __func__, ret);
873 return ret;
874}
875
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700876static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
877{
878 int ret = 0;
879 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
880 struct sdhci_msm_host *msm_host = pltfm_host->priv;
881
882 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
883
884 /*
885 * Retuning in HS400 (DDR mode) will fail, just reset the
886 * tuning block and restore the saved tuning phase.
887 */
888 ret = msm_init_cm_dll(host);
889 if (ret)
890 goto out;
891
892 /* Set the selected phase in delay line hw block */
893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
894 if (ret)
895 goto out;
896
Krishna Konda0e8efba2014-06-23 14:50:38 -0700897 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
898 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
899 | CORE_CMD_DAT_TRACK_SEL),
900 host->ioaddr + CORE_DLL_CONFIG);
901
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700902 if (msm_host->use_cdclp533)
903 /* Calibrate CDCLP533 DLL HW */
904 ret = sdhci_msm_cdclp533_calibration(host);
905 else
906 /* Calibrate CM_DLL_SDC4 HW */
907 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
908out:
909 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
910 __func__, ret);
911 return ret;
912}
913
Krishna Konda96e6b112013-10-28 15:25:03 -0700914static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
915 u8 drv_type)
916{
917 struct mmc_command cmd = {0};
918 struct mmc_request mrq = {NULL};
919 struct mmc_host *mmc = host->mmc;
920 u8 val = ((drv_type << 4) | 2);
921
922 cmd.opcode = MMC_SWITCH;
923 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
924 (EXT_CSD_HS_TIMING << 16) |
925 (val << 8) |
926 EXT_CSD_CMD_SET_NORMAL;
927 cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
928 /* 1 sec */
929 cmd.busy_timeout = 1000 * 1000;
930
931 memset(cmd.resp, 0, sizeof(cmd.resp));
932 cmd.retries = 3;
933
934 mrq.cmd = &cmd;
935 cmd.data = NULL;
936
937 mmc_wait_for_req(mmc, &mrq);
938 pr_debug("%s: %s: set card drive type to %d\n",
939 mmc_hostname(mmc), __func__,
940 drv_type);
941}
942
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700943int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
944{
945 unsigned long flags;
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530946 int tuning_seq_cnt = 3;
Krishna Konda96e6b112013-10-28 15:25:03 -0700947 u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700948 const u32 *tuning_block_pattern = tuning_block_64;
949 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
950 int rc;
951 struct mmc_host *mmc = host->mmc;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530952 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700953 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
954 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Krishna Konda96e6b112013-10-28 15:25:03 -0700955 u8 drv_type = 0;
956 bool drv_type_changed = false;
957 struct mmc_card *card = host->mmc->card;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +0530958 int sts_retry;
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530959
960 /*
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700961 * Tuning is required for SDR104, HS200 and HS400 cards and
962 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530963 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -0700964 if (host->clock <= CORE_FREQ_100MHZ ||
965 !((ios.timing == MMC_TIMING_MMC_HS400) ||
966 (ios.timing == MMC_TIMING_MMC_HS200) ||
967 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala22dd3362013-02-28 19:50:51 +0530968 return 0;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700969
970 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700971
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700972 /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700973 if (msm_host->tuning_done && !msm_host->calibration_done &&
974 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
Krishna Konda2faa7bb2014-06-04 01:25:16 -0700975 rc = sdhci_msm_hs400_dll_calibration(host);
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -0700976 spin_lock_irqsave(&host->lock, flags);
977 if (!rc)
978 msm_host->calibration_done = true;
979 spin_unlock_irqrestore(&host->lock, flags);
980 goto out;
981 }
982
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700983 spin_lock_irqsave(&host->lock, flags);
984
985 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
986 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
987 tuning_block_pattern = tuning_block_128;
988 size = sizeof(tuning_block_128);
989 }
990 spin_unlock_irqrestore(&host->lock, flags);
991
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -0700992 data_buf = kmalloc(size, GFP_KERNEL);
993 if (!data_buf) {
994 rc = -ENOMEM;
995 goto out;
996 }
997
Sahitya Tummala9fe16532013-06-13 10:36:57 +0530998retry:
Krishna Konda96e6b112013-10-28 15:25:03 -0700999 tuned_phase_cnt = 0;
1000
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301001 /* first of all reset the tuning block */
1002 rc = msm_init_cm_dll(host);
1003 if (rc)
1004 goto kfree;
1005
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001006 phase = 0;
1007 do {
1008 struct mmc_command cmd = {0};
1009 struct mmc_data data = {0};
1010 struct mmc_request mrq = {
1011 .cmd = &cmd,
1012 .data = &data
1013 };
1014 struct scatterlist sg;
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301015 struct mmc_command sts_cmd = {0};
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001016
1017 /* set the phase in delay line hw block */
1018 rc = msm_config_cm_dll_phase(host, phase);
1019 if (rc)
1020 goto kfree;
1021
1022 cmd.opcode = opcode;
1023 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1024
1025 data.blksz = size;
1026 data.blocks = 1;
1027 data.flags = MMC_DATA_READ;
1028 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
1029
1030 data.sg = &sg;
1031 data.sg_len = 1;
1032 sg_init_one(&sg, data_buf, size);
1033 memset(data_buf, 0, size);
1034 mmc_wait_for_req(mmc, &mrq);
1035
Sahitya Tummalafaff7f82015-02-25 14:24:52 +05301036 if (card && (cmd.error || data.error)) {
1037 sts_cmd.opcode = MMC_SEND_STATUS;
1038 sts_cmd.arg = card->rca << 16;
1039 sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1040 sts_retry = 5;
1041 while (sts_retry) {
1042 mmc_wait_for_cmd(mmc, &sts_cmd, 0);
1043
1044 if (sts_cmd.error ||
1045 (R1_CURRENT_STATE(sts_cmd.resp[0])
1046 != R1_STATE_TRAN)) {
1047 sts_retry--;
1048 /*
1049 * wait for at least 146 MCLK cycles for
1050 * the card to move to TRANS state. As
1051 * the MCLK would be min 200MHz for
1052 * tuning, we need max 0.73us delay. To
1053 * be on safer side 1ms delay is given.
1054 */
1055 usleep_range(1000, 1200);
1056 pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
1057 mmc_hostname(mmc), phase,
1058 sts_cmd.error, sts_cmd.resp[0]);
1059 continue;
1060 }
1061 break;
1062 };
1063 }
1064
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001065 if (!cmd.error && !data.error &&
1066 !memcmp(data_buf, tuning_block_pattern, size)) {
1067 /* tuning is successful at this tuning point */
1068 tuned_phases[tuned_phase_cnt++] = phase;
Krishna Konda96e6b112013-10-28 15:25:03 -07001069 pr_debug("%s: %s: found *** good *** phase = %d\n",
1070 mmc_hostname(mmc), __func__, phase);
1071 } else {
1072 pr_debug("%s: %s: found ## bad ## phase = %d\n",
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001073 mmc_hostname(mmc), __func__, phase);
1074 }
1075 } while (++phase < 16);
1076
Sahitya Tummaladfdb4af2014-04-01 14:29:13 +05301077 if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
1078 card && mmc_card_mmc(card)) {
Krishna Konda96e6b112013-10-28 15:25:03 -07001079 /*
1080 * If all phases pass then its a problem. So change the card's
1081 * drive type to a different value, if supported and repeat
1082 * tuning until at least one phase fails. Then set the original
1083 * drive type back.
1084 *
1085 * If all the phases still pass after trying all possible
1086 * drive types, then one of those 16 phases will be picked.
1087 * This is no different from what was going on before the
1088 * modification to change drive type and retune.
1089 */
1090 pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
1091 tuned_phase_cnt);
1092
1093 /* set drive type to other value . default setting is 0x0 */
1094 while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
Talel Shenhar6f0f3412015-11-08 14:21:31 +02001095 pr_debug("%s: trying different drive strength (%d)\n",
1096 mmc_hostname(mmc), drv_type);
Krishna Konda96e6b112013-10-28 15:25:03 -07001097 if (card->ext_csd.raw_driver_strength &
1098 (1 << drv_type)) {
1099 sdhci_msm_set_mmc_drv_type(host, opcode,
1100 drv_type);
1101 if (!drv_type_changed)
1102 drv_type_changed = true;
1103 goto retry;
1104 }
1105 }
1106 }
1107
1108 /* reset drive type to default (50 ohm) if changed */
1109 if (drv_type_changed)
1110 sdhci_msm_set_mmc_drv_type(host, opcode, 0);
1111
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001112 if (tuned_phase_cnt) {
1113 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1114 tuned_phase_cnt);
1115 if (rc < 0)
1116 goto kfree;
1117 else
1118 phase = (u8)rc;
1119
1120 /*
1121 * Finally set the selected phase in delay
1122 * line hw block.
1123 */
1124 rc = msm_config_cm_dll_phase(host, phase);
1125 if (rc)
1126 goto kfree;
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001127 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001128 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
1129 mmc_hostname(mmc), __func__, phase);
1130 } else {
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301131 if (--tuning_seq_cnt)
1132 goto retry;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001133 /* tuning failed */
1134 pr_err("%s: %s: no tuning point found\n",
1135 mmc_hostname(mmc), __func__);
Sahitya Tummala9fe16532013-06-13 10:36:57 +05301136 rc = -EIO;
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001137 }
1138
1139kfree:
1140 kfree(data_buf);
1141out:
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07001142 spin_lock_irqsave(&host->lock, flags);
1143 if (!rc)
1144 msm_host->tuning_done = true;
1145 spin_unlock_irqrestore(&host->lock, flags);
1146 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07001147 return rc;
1148}
1149
Asutosh Das0ef24812012-12-18 16:14:02 +05301150static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
1151{
1152 struct sdhci_msm_gpio_data *curr;
1153 int i, ret = 0;
1154
1155 curr = pdata->pin_data->gpio_data;
1156 for (i = 0; i < curr->size; i++) {
1157 if (!gpio_is_valid(curr->gpio[i].no)) {
1158 ret = -EINVAL;
1159 pr_err("%s: Invalid gpio = %d\n", __func__,
1160 curr->gpio[i].no);
1161 goto free_gpios;
1162 }
1163 if (enable) {
1164 ret = gpio_request(curr->gpio[i].no,
1165 curr->gpio[i].name);
1166 if (ret) {
1167 pr_err("%s: gpio_request(%d, %s) failed %d\n",
1168 __func__, curr->gpio[i].no,
1169 curr->gpio[i].name, ret);
1170 goto free_gpios;
1171 }
1172 curr->gpio[i].is_enabled = true;
1173 } else {
1174 gpio_free(curr->gpio[i].no);
1175 curr->gpio[i].is_enabled = false;
1176 }
1177 }
1178 return ret;
1179
1180free_gpios:
1181 for (i--; i >= 0; i--) {
1182 gpio_free(curr->gpio[i].no);
1183 curr->gpio[i].is_enabled = false;
1184 }
1185 return ret;
1186}
1187
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301188static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
1189 bool enable)
1190{
1191 int ret = 0;
1192
1193 if (enable)
1194 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1195 pdata->pctrl_data->pins_active);
1196 else
1197 ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
1198 pdata->pctrl_data->pins_sleep);
1199
1200 if (ret < 0)
1201 pr_err("%s state for pinctrl failed with %d\n",
1202 enable ? "Enabling" : "Disabling", ret);
1203
1204 return ret;
1205}
1206
Asutosh Das0ef24812012-12-18 16:14:02 +05301207static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
1208{
1209 int ret = 0;
1210
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301211 if (pdata->pin_cfg_sts == enable) {
Asutosh Das0ef24812012-12-18 16:14:02 +05301212 return 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301213 } else if (pdata->pctrl_data) {
1214 ret = sdhci_msm_setup_pinctrl(pdata, enable);
1215 goto out;
1216 } else if (!pdata->pin_data) {
1217 return 0;
1218 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301219
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301220 if (pdata->pin_data->is_gpio)
1221 ret = sdhci_msm_setup_gpio(pdata, enable);
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301222out:
Asutosh Das0ef24812012-12-18 16:14:02 +05301223 if (!ret)
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301224 pdata->pin_cfg_sts = enable;
Asutosh Das0ef24812012-12-18 16:14:02 +05301225
1226 return ret;
1227}
1228
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301229static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
1230 u32 **out, int *len, u32 size)
1231{
1232 int ret = 0;
1233 struct device_node *np = dev->of_node;
1234 size_t sz;
1235 u32 *arr = NULL;
1236
1237 if (!of_get_property(np, prop_name, len)) {
1238 ret = -EINVAL;
1239 goto out;
1240 }
1241 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001242 if (sz <= 0 || (size > 0 && (sz > size))) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301243 dev_err(dev, "%s invalid size\n", prop_name);
1244 ret = -EINVAL;
1245 goto out;
1246 }
1247
1248 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1249 if (!arr) {
1250 dev_err(dev, "%s failed allocating memory\n", prop_name);
1251 ret = -ENOMEM;
1252 goto out;
1253 }
1254
1255 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1256 if (ret < 0) {
1257 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1258 goto out;
1259 }
1260 *out = arr;
1261out:
1262 if (ret)
1263 *len = 0;
1264 return ret;
1265}
1266
Asutosh Das0ef24812012-12-18 16:14:02 +05301267#define MAX_PROP_SIZE 32
1268static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1269 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1270{
1271 int len, ret = 0;
1272 const __be32 *prop;
1273 char prop_name[MAX_PROP_SIZE];
1274 struct sdhci_msm_reg_data *vreg;
1275 struct device_node *np = dev->of_node;
1276
1277 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1278 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301279 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das0ef24812012-12-18 16:14:02 +05301280 return ret;
1281 }
1282
1283 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1284 if (!vreg) {
1285 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1286 ret = -ENOMEM;
1287 return ret;
1288 }
1289
1290 vreg->name = vreg_name;
1291
1292 snprintf(prop_name, MAX_PROP_SIZE,
1293 "qcom,%s-always-on", vreg_name);
1294 if (of_get_property(np, prop_name, NULL))
1295 vreg->is_always_on = true;
1296
1297 snprintf(prop_name, MAX_PROP_SIZE,
1298 "qcom,%s-lpm-sup", vreg_name);
1299 if (of_get_property(np, prop_name, NULL))
1300 vreg->lpm_sup = true;
1301
1302 snprintf(prop_name, MAX_PROP_SIZE,
1303 "qcom,%s-voltage-level", vreg_name);
1304 prop = of_get_property(np, prop_name, &len);
1305 if (!prop || (len != (2 * sizeof(__be32)))) {
1306 dev_warn(dev, "%s %s property\n",
1307 prop ? "invalid format" : "no", prop_name);
1308 } else {
1309 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1310 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1311 }
1312
1313 snprintf(prop_name, MAX_PROP_SIZE,
1314 "qcom,%s-current-level", vreg_name);
1315 prop = of_get_property(np, prop_name, &len);
1316 if (!prop || (len != (2 * sizeof(__be32)))) {
1317 dev_warn(dev, "%s %s property\n",
1318 prop ? "invalid format" : "no", prop_name);
1319 } else {
1320 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1321 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1322 }
1323
1324 *vreg_data = vreg;
1325 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1326 vreg->name, vreg->is_always_on ? "always_on," : "",
1327 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1328 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1329
1330 return ret;
1331}
1332
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301333static int sdhci_msm_parse_pinctrl_info(struct device *dev,
1334 struct sdhci_msm_pltfm_data *pdata)
1335{
1336 struct sdhci_pinctrl_data *pctrl_data;
1337 struct pinctrl *pctrl;
1338 int ret = 0;
1339
1340 /* Try to obtain pinctrl handle */
1341 pctrl = devm_pinctrl_get(dev);
1342 if (IS_ERR(pctrl)) {
1343 ret = PTR_ERR(pctrl);
1344 goto out;
1345 }
1346 pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
1347 if (!pctrl_data) {
1348 dev_err(dev, "No memory for sdhci_pinctrl_data\n");
1349 ret = -ENOMEM;
1350 goto out;
1351 }
1352 pctrl_data->pctrl = pctrl;
1353 /* Look-up and keep the states handy to be used later */
1354 pctrl_data->pins_active = pinctrl_lookup_state(
1355 pctrl_data->pctrl, "active");
1356 if (IS_ERR(pctrl_data->pins_active)) {
1357 ret = PTR_ERR(pctrl_data->pins_active);
1358 dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
1359 goto out;
1360 }
1361 pctrl_data->pins_sleep = pinctrl_lookup_state(
1362 pctrl_data->pctrl, "sleep");
1363 if (IS_ERR(pctrl_data->pins_sleep)) {
1364 ret = PTR_ERR(pctrl_data->pins_sleep);
1365 dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
1366 goto out;
1367 }
1368 pdata->pctrl_data = pctrl_data;
1369out:
1370 return ret;
1371}
1372
Asutosh Das0ef24812012-12-18 16:14:02 +05301373#define GPIO_NAME_MAX_LEN 32
1374static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1375 struct sdhci_msm_pltfm_data *pdata)
1376{
1377 int ret = 0, cnt, i;
1378 struct sdhci_msm_pin_data *pin_data;
1379 struct device_node *np = dev->of_node;
1380
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301381 ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
1382 if (!ret) {
1383 goto out;
1384 } else if (ret == -EPROBE_DEFER) {
1385 dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
1386 goto out;
1387 } else {
1388 dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
1389 ret);
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301390 ret = 0;
Pratibhasagar V9acf2642013-11-21 21:07:21 +05301391 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301392 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1393 if (!pin_data) {
1394 dev_err(dev, "No memory for pin_data\n");
1395 ret = -ENOMEM;
1396 goto out;
1397 }
1398
1399 cnt = of_gpio_count(np);
1400 if (cnt > 0) {
1401 pin_data->gpio_data = devm_kzalloc(dev,
1402 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1403 if (!pin_data->gpio_data) {
1404 dev_err(dev, "No memory for gpio_data\n");
1405 ret = -ENOMEM;
1406 goto out;
1407 }
1408 pin_data->gpio_data->size = cnt;
1409 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1410 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1411
1412 if (!pin_data->gpio_data->gpio) {
1413 dev_err(dev, "No memory for gpio\n");
1414 ret = -ENOMEM;
1415 goto out;
1416 }
1417
1418 for (i = 0; i < cnt; i++) {
1419 const char *name = NULL;
1420 char result[GPIO_NAME_MAX_LEN];
1421 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1422 of_property_read_string_index(np,
1423 "qcom,gpio-names", i, &name);
1424
1425 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1426 dev_name(dev), name ? name : "?");
1427 pin_data->gpio_data->gpio[i].name = result;
1428 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
1429 pin_data->gpio_data->gpio[i].name,
1430 pin_data->gpio_data->gpio[i].no);
Asutosh Das0ef24812012-12-18 16:14:02 +05301431 }
1432 }
Sahitya Tummala1cd7e072014-02-14 13:19:01 +05301433 pdata->pin_data = pin_data;
Asutosh Das0ef24812012-12-18 16:14:02 +05301434out:
1435 if (ret)
1436 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1437 return ret;
1438}
1439
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001440#ifdef CONFIG_SMP
1441static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
1442{
1443 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
1444}
1445#else
1446static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
1447#endif
1448
Gilad Bronerc788a672015-09-08 15:39:11 +03001449static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
1450 struct sdhci_msm_pltfm_data *pdata)
1451{
1452 struct device_node *np = dev->of_node;
1453 const char *str;
1454 u32 cpu;
1455 int ret = 0;
1456 int i;
1457
1458 pdata->pm_qos_data.irq_valid = false;
1459 pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
1460 if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
1461 !strcmp(str, "affine_irq")) {
Krishna Kondaf85e31a2015-10-23 11:43:02 -07001462 parse_affine_irq(pdata);
Gilad Bronerc788a672015-09-08 15:39:11 +03001463 }
1464
1465 /* must specify cpu for "affine_cores" type */
1466 if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
1467 pdata->pm_qos_data.irq_cpu = -1;
1468 ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
1469 if (ret) {
1470 dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
1471 ret);
1472 goto out;
1473 }
1474 if (cpu < 0 || cpu >= num_possible_cpus()) {
1475 dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
1476 __func__, cpu, num_possible_cpus());
1477 ret = -EINVAL;
1478 goto out;
1479 }
1480 pdata->pm_qos_data.irq_cpu = cpu;
1481 }
1482
1483 if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
1484 SDHCI_POWER_POLICY_NUM) {
1485 dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
1486 __func__, SDHCI_POWER_POLICY_NUM);
1487 ret = -EINVAL;
1488 goto out;
1489 }
1490
1491 for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
1492 of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
1493 &pdata->pm_qos_data.irq_latency.latency[i]);
1494
1495 pdata->pm_qos_data.irq_valid = true;
1496out:
1497 return ret;
1498}
1499
1500static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
1501 struct sdhci_msm_pltfm_data *pdata)
1502{
1503 struct device_node *np = dev->of_node;
1504 u32 mask;
1505 int nr_groups;
1506 int ret;
1507 int i;
1508
1509 /* Read cpu group mapping */
1510 nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
1511 if (nr_groups <= 0) {
1512 ret = -EINVAL;
1513 goto out;
1514 }
1515 pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
1516 pdata->pm_qos_data.cpu_group_map.mask =
1517 kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
1518 if (!pdata->pm_qos_data.cpu_group_map.mask) {
1519 ret = -ENOMEM;
1520 goto out;
1521 }
1522
1523 for (i = 0; i < nr_groups; i++) {
1524 of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
1525 i, &mask);
1526
1527 pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
1528 if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
1529 cpu_possible_mask)) {
1530 dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
1531 __func__, mask, i);
1532 ret = -EINVAL;
1533 goto free_res;
1534 }
1535 }
1536 return 0;
1537
1538free_res:
1539 kfree(pdata->pm_qos_data.cpu_group_map.mask);
1540out:
1541 return ret;
1542}
1543
1544static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
1545 int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
1546{
1547 struct device_node *np = dev->of_node;
1548 struct sdhci_msm_pm_qos_latency *values;
1549 int ret;
1550 int i;
1551 int group;
1552 int cfg;
1553
1554 ret = of_property_count_u32_elems(np, name);
1555 if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
1556 dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
1557 __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
1558 ret);
1559 return -EINVAL;
1560 } else if (ret < 0) {
1561 return ret;
1562 }
1563
1564 values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
1565 GFP_KERNEL);
1566 if (!values)
1567 return -ENOMEM;
1568
1569 for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
1570 group = i / SDHCI_POWER_POLICY_NUM;
1571 cfg = i % SDHCI_POWER_POLICY_NUM;
1572 of_property_read_u32_index(np, name, i,
1573 &(values[group].latency[cfg]));
1574 }
1575
1576 *latency = values;
1577 return 0;
1578}
1579
1580static void sdhci_msm_pm_qos_parse(struct device *dev,
1581 struct sdhci_msm_pltfm_data *pdata)
1582{
1583 if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
1584 dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
1585 __func__);
1586
1587 if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
1588 pdata->pm_qos_data.cmdq_valid =
1589 !sdhci_msm_pm_qos_parse_latency(dev,
1590 "qcom,pm-qos-cmdq-latency-us",
1591 pdata->pm_qos_data.cpu_group_map.nr_groups,
1592 &pdata->pm_qos_data.cmdq_latency);
1593 pdata->pm_qos_data.legacy_valid =
1594 !sdhci_msm_pm_qos_parse_latency(dev,
1595 "qcom,pm-qos-legacy-latency-us",
1596 pdata->pm_qos_data.cpu_group_map.nr_groups,
1597 &pdata->pm_qos_data.latency);
1598 if (!pdata->pm_qos_data.cmdq_valid &&
1599 !pdata->pm_qos_data.legacy_valid) {
1600 /* clean-up previously allocated arrays */
1601 kfree(pdata->pm_qos_data.latency);
1602 kfree(pdata->pm_qos_data.cmdq_latency);
1603 dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
1604 __func__);
1605 }
1606 } else {
1607 dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
1608 __func__);
1609 }
1610}
1611
Asutosh Das0ef24812012-12-18 16:14:02 +05301612/* Parse platform data */
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001613static
1614struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
1615 struct sdhci_msm_host *msm_host)
Asutosh Das0ef24812012-12-18 16:14:02 +05301616{
1617 struct sdhci_msm_pltfm_data *pdata = NULL;
1618 struct device_node *np = dev->of_node;
1619 u32 bus_width = 0;
1620 int len, i;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301621 int clk_table_len;
1622 u32 *clk_table = NULL;
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301623 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05301624
1625 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1626 if (!pdata) {
1627 dev_err(dev, "failed to allocate memory for platform data\n");
1628 goto out;
1629 }
1630
Sujit Reddy Thumma1958d3d2013-06-03 09:54:32 +05301631 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1632 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1633 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala581df132013-03-12 14:57:46 +05301634
Asutosh Das0ef24812012-12-18 16:14:02 +05301635 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1636 if (bus_width == 8)
1637 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1638 else if (bus_width == 4)
1639 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1640 else {
1641 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1642 pdata->mmc_bus_width = 0;
1643 }
1644
Talel Shenhar7dc5f792015-05-18 12:12:48 +03001645 if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
1646 &msm_host->mmc->clk_scaling.freq_table,
1647 &msm_host->mmc->clk_scaling.freq_table_sz, 0))
1648 pr_debug("%s: no clock scaling frequencies were supplied\n",
1649 dev_name(dev));
1650 else if (!msm_host->mmc->clk_scaling.freq_table ||
1651 !msm_host->mmc->clk_scaling.freq_table_sz)
1652 dev_err(dev, "bad dts clock scaling frequencies\n");
1653
Sahitya Tummala22dd3362013-02-28 19:50:51 +05301654 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1655 &clk_table, &clk_table_len, 0)) {
1656 dev_err(dev, "failed parsing supported clock rates\n");
1657 goto out;
1658 }
1659 if (!clk_table || !clk_table_len) {
1660 dev_err(dev, "Invalid clock table\n");
1661 goto out;
1662 }
1663 pdata->sup_clk_table = clk_table;
1664 pdata->sup_clk_cnt = clk_table_len;
1665
Asutosh Das0ef24812012-12-18 16:14:02 +05301666 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1667 sdhci_msm_slot_reg_data),
1668 GFP_KERNEL);
1669 if (!pdata->vreg_data) {
1670 dev_err(dev, "failed to allocate memory for vreg data\n");
1671 goto out;
1672 }
1673
1674 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1675 "vdd")) {
1676 dev_err(dev, "failed parsing vdd data\n");
1677 goto out;
1678 }
1679 if (sdhci_msm_dt_parse_vreg_info(dev,
1680 &pdata->vreg_data->vdd_io_data,
1681 "vdd-io")) {
1682 dev_err(dev, "failed parsing vdd-io data\n");
1683 goto out;
1684 }
1685
1686 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1687 dev_err(dev, "failed parsing gpio data\n");
1688 goto out;
1689 }
1690
Asutosh Das0ef24812012-12-18 16:14:02 +05301691 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1692
1693 for (i = 0; i < len; i++) {
1694 const char *name = NULL;
1695
1696 of_property_read_string_index(np,
1697 "qcom,bus-speed-mode", i, &name);
1698 if (!name)
1699 continue;
1700
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07001701 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1702 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1703 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1704 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1705 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das0ef24812012-12-18 16:14:02 +05301706 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1707 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1708 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1709 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1710 pdata->caps |= MMC_CAP_1_8V_DDR
1711 | MMC_CAP_UHS_DDR50;
1712 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1713 pdata->caps |= MMC_CAP_1_2V_DDR
1714 | MMC_CAP_UHS_DDR50;
1715 }
1716
1717 if (of_get_property(np, "qcom,nonremovable", NULL))
1718 pdata->nonremovable = true;
1719
Guoping Yuf7c91332014-08-20 16:56:18 +08001720 if (of_get_property(np, "qcom,nonhotplug", NULL))
1721 pdata->nonhotplug = true;
1722
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08001723 pdata->largeaddressbus =
1724 of_property_read_bool(np, "qcom,large-address-bus");
1725
Dov Levenglickc9033ab2015-03-10 16:00:56 +02001726 if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
1727 msm_host->mmc->wakeup_on_idle = true;
1728
Gilad Bronerc788a672015-09-08 15:39:11 +03001729 sdhci_msm_pm_qos_parse(dev, pdata);
1730
Pavan Anamula5a256df2015-10-16 14:38:28 +05301731 if (of_get_property(np, "qcom,core_3_0v_support", NULL))
1732 pdata->core_3_0v_support = true;
1733
Asutosh Das0ef24812012-12-18 16:14:02 +05301734 return pdata;
1735out:
1736 return NULL;
1737}
1738
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301739/* Returns required bandwidth in Bytes per Sec */
1740static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1741 struct mmc_ios *ios)
1742{
Sahitya Tummala2886c922013-04-03 18:03:31 +05301743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1744 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1745
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301746 unsigned int bw;
1747
Sahitya Tummala2886c922013-04-03 18:03:31 +05301748 bw = msm_host->clk_rate;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301749 /*
1750 * For DDR mode, SDCC controller clock will be at
1751 * the double rate than the actual clock that goes to card.
1752 */
1753 if (ios->bus_width == MMC_BUS_WIDTH_4)
1754 bw /= 2;
1755 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1756 bw /= 8;
1757
1758 return bw;
1759}
1760
1761static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1762 unsigned int bw)
1763{
1764 unsigned int *table = host->pdata->voting_data->bw_vecs;
1765 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1766 int i;
1767
1768 if (host->msm_bus_vote.is_max_bw_needed && bw)
1769 return host->msm_bus_vote.max_bw_vote;
1770
1771 for (i = 0; i < size; i++) {
1772 if (bw <= table[i])
1773 break;
1774 }
1775
1776 if (i && (i == size))
1777 i--;
1778
1779 return i;
1780}
1781
1782/*
1783 * This function must be called with host lock acquired.
1784 * Caller of this function should also ensure that msm bus client
1785 * handle is not null.
1786 */
1787static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1788 int vote,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301789 unsigned long *flags)
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301790{
1791 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1792 int rc = 0;
1793
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301794 BUG_ON(!flags);
1795
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301796 if (vote != msm_host->msm_bus_vote.curr_vote) {
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301797 spin_unlock_irqrestore(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301798 rc = msm_bus_scale_client_update_request(
1799 msm_host->msm_bus_vote.client_handle, vote);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301800 spin_lock_irqsave(&host->lock, *flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301801 if (rc) {
1802 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1803 mmc_hostname(host->mmc),
1804 msm_host->msm_bus_vote.client_handle, vote, rc);
1805 goto out;
1806 }
1807 msm_host->msm_bus_vote.curr_vote = vote;
1808 }
1809out:
1810 return rc;
1811}
1812
1813/*
1814 * Internal work. Work to set 0 bandwidth for msm bus.
1815 */
1816static void sdhci_msm_bus_work(struct work_struct *work)
1817{
1818 struct sdhci_msm_host *msm_host;
1819 struct sdhci_host *host;
1820 unsigned long flags;
1821
1822 msm_host = container_of(work, struct sdhci_msm_host,
1823 msm_bus_vote.vote_work.work);
1824 host = platform_get_drvdata(msm_host->pdev);
1825
1826 if (!msm_host->msm_bus_vote.client_handle)
1827 return;
1828
1829 spin_lock_irqsave(&host->lock, flags);
1830 /* don't vote for 0 bandwidth if any request is in progress */
1831 if (!host->mrq) {
1832 sdhci_msm_bus_set_vote(msm_host,
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301833 msm_host->msm_bus_vote.min_bw_vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301834 } else
1835 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1836 mmc_hostname(host->mmc), __func__);
1837 spin_unlock_irqrestore(&host->lock, flags);
1838}
1839
1840/*
1841 * This function cancels any scheduled delayed work and sets the bus
1842 * vote based on bw (bandwidth) argument.
1843 */
1844static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1845 unsigned int bw)
1846{
1847 int vote;
1848 unsigned long flags;
1849 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1850 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1851
1852 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1853 spin_lock_irqsave(&host->lock, flags);
1854 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
Sujit Reddy Thumma024c0582013-08-06 11:21:33 +05301855 sdhci_msm_bus_set_vote(msm_host, vote, &flags);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301856 spin_unlock_irqrestore(&host->lock, flags);
1857}
1858
1859#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1860
1861/* This function queues a work which will set the bandwidth requiement to 0 */
1862static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1863{
1864 unsigned long flags;
1865 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1866 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1867
1868 spin_lock_irqsave(&host->lock, flags);
1869 if (msm_host->msm_bus_vote.min_bw_vote !=
1870 msm_host->msm_bus_vote.curr_vote)
1871 queue_delayed_work(system_wq,
1872 &msm_host->msm_bus_vote.vote_work,
1873 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1874 spin_unlock_irqrestore(&host->lock, flags);
1875}
1876
1877static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1878 struct platform_device *pdev)
1879{
1880 int rc = 0;
1881 struct msm_bus_scale_pdata *bus_pdata;
1882
1883 struct sdhci_msm_bus_voting_data *data;
1884 struct device *dev = &pdev->dev;
1885
1886 data = devm_kzalloc(dev,
1887 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1888 if (!data) {
1889 dev_err(&pdev->dev,
1890 "%s: failed to allocate memory\n", __func__);
1891 rc = -ENOMEM;
1892 goto out;
1893 }
1894 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1895 if (data->bus_pdata) {
1896 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1897 &data->bw_vecs, &data->bw_vecs_size, 0);
1898 if (rc) {
1899 dev_err(&pdev->dev,
1900 "%s: Failed to get bus-bw-vectors-bps\n",
1901 __func__);
1902 goto out;
1903 }
1904 host->pdata->voting_data = data;
1905 }
1906 if (host->pdata->voting_data &&
1907 host->pdata->voting_data->bus_pdata &&
1908 host->pdata->voting_data->bw_vecs &&
1909 host->pdata->voting_data->bw_vecs_size) {
1910
1911 bus_pdata = host->pdata->voting_data->bus_pdata;
1912 host->msm_bus_vote.client_handle =
1913 msm_bus_scale_register_client(bus_pdata);
1914 if (!host->msm_bus_vote.client_handle) {
1915 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1916 rc = -EFAULT;
1917 goto out;
1918 }
1919 /* cache the vote index for minimum and maximum bandwidth */
1920 host->msm_bus_vote.min_bw_vote =
1921 sdhci_msm_bus_get_vote_for_bw(host, 0);
1922 host->msm_bus_vote.max_bw_vote =
1923 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1924 } else {
1925 devm_kfree(dev, data);
1926 }
1927
1928out:
1929 return rc;
1930}
1931
1932static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1933{
1934 if (host->msm_bus_vote.client_handle)
1935 msm_bus_scale_unregister_client(
1936 host->msm_bus_vote.client_handle);
1937}
1938
1939static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1940{
1941 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1942 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1943 struct mmc_ios *ios = &host->mmc->ios;
1944 unsigned int bw;
1945
1946 if (!msm_host->msm_bus_vote.client_handle)
1947 return;
1948
1949 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301950 if (enable) {
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301951 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05301952 } else {
1953 /*
1954 * If clock gating is enabled, then remove the vote
1955 * immediately because clocks will be disabled only
1956 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1957 * additional delay is required to remove the bus vote.
1958 */
1959#ifdef CONFIG_MMC_CLKGATE
1960 if (host->mmc->clkgate_delay)
1961 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1962 else
1963#endif
1964 sdhci_msm_bus_queue_work(host);
1965 }
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05301966}
1967
Asutosh Das0ef24812012-12-18 16:14:02 +05301968/* Regulator utility functions */
1969static int sdhci_msm_vreg_init_reg(struct device *dev,
1970 struct sdhci_msm_reg_data *vreg)
1971{
1972 int ret = 0;
1973
1974 /* check if regulator is already initialized? */
1975 if (vreg->reg)
1976 goto out;
1977
1978 /* Get the regulator handle */
1979 vreg->reg = devm_regulator_get(dev, vreg->name);
1980 if (IS_ERR(vreg->reg)) {
1981 ret = PTR_ERR(vreg->reg);
1982 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1983 __func__, vreg->name, ret);
1984 goto out;
1985 }
1986
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05301987 if (regulator_count_voltages(vreg->reg) > 0) {
1988 vreg->set_voltage_sup = true;
1989 /* sanity check */
1990 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1991 pr_err("%s: %s invalid constraints specified\n",
1992 __func__, vreg->name);
1993 ret = -EINVAL;
1994 }
Asutosh Das0ef24812012-12-18 16:14:02 +05301995 }
1996
1997out:
1998 return ret;
1999}
2000
2001static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
2002{
2003 if (vreg->reg)
2004 devm_regulator_put(vreg->reg);
2005}
2006
2007static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
2008 *vreg, int uA_load)
2009{
2010 int ret = 0;
2011
2012 /*
2013 * regulators that do not support regulator_set_voltage also
2014 * do not support regulator_set_optimum_mode
2015 */
2016 if (vreg->set_voltage_sup) {
2017 ret = regulator_set_load(vreg->reg, uA_load);
2018 if (ret < 0)
2019 pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
2020 __func__, vreg->name, uA_load, ret);
2021 else
2022 /*
2023 * regulator_set_load() can return non zero
2024 * value even for success case.
2025 */
2026 ret = 0;
2027 }
2028 return ret;
2029}
2030
2031static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
2032 int min_uV, int max_uV)
2033{
2034 int ret = 0;
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302035 if (vreg->set_voltage_sup) {
2036 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
2037 if (ret) {
2038 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das0ef24812012-12-18 16:14:02 +05302039 __func__, vreg->name, min_uV, max_uV, ret);
2040 }
Asutosh Dasc58cc7a2013-06-28 15:03:44 +05302041 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302042
2043 return ret;
2044}
2045
2046static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
2047{
2048 int ret = 0;
2049
2050 /* Put regulator in HPM (high power mode) */
2051 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
2052 if (ret < 0)
2053 return ret;
2054
2055 if (!vreg->is_enabled) {
2056 /* Set voltage level */
2057 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
2058 vreg->high_vol_level);
2059 if (ret)
2060 return ret;
2061 }
2062 ret = regulator_enable(vreg->reg);
2063 if (ret) {
2064 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
2065 __func__, vreg->name, ret);
2066 return ret;
2067 }
2068 vreg->is_enabled = true;
2069 return ret;
2070}
2071
2072static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
2073{
2074 int ret = 0;
2075
2076 /* Never disable regulator marked as always_on */
2077 if (vreg->is_enabled && !vreg->is_always_on) {
2078 ret = regulator_disable(vreg->reg);
2079 if (ret) {
2080 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
2081 __func__, vreg->name, ret);
2082 goto out;
2083 }
2084 vreg->is_enabled = false;
2085
2086 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
2087 if (ret < 0)
2088 goto out;
2089
2090 /* Set min. voltage level to 0 */
2091 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
2092 if (ret)
2093 goto out;
2094 } else if (vreg->is_enabled && vreg->is_always_on) {
2095 if (vreg->lpm_sup) {
2096 /* Put always_on regulator in LPM (low power mode) */
2097 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
2098 vreg->lpm_uA);
2099 if (ret < 0)
2100 goto out;
2101 }
2102 }
2103out:
2104 return ret;
2105}
2106
2107static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
2108 bool enable, bool is_init)
2109{
2110 int ret = 0, i;
2111 struct sdhci_msm_slot_reg_data *curr_slot;
2112 struct sdhci_msm_reg_data *vreg_table[2];
2113
2114 curr_slot = pdata->vreg_data;
2115 if (!curr_slot) {
2116 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
2117 __func__);
2118 goto out;
2119 }
2120
2121 vreg_table[0] = curr_slot->vdd_data;
2122 vreg_table[1] = curr_slot->vdd_io_data;
2123
2124 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
2125 if (vreg_table[i]) {
2126 if (enable)
2127 ret = sdhci_msm_vreg_enable(vreg_table[i]);
2128 else
2129 ret = sdhci_msm_vreg_disable(vreg_table[i]);
2130 if (ret)
2131 goto out;
2132 }
2133 }
2134out:
2135 return ret;
2136}
2137
2138/*
2139 * Reset vreg by ensuring it is off during probe. A call
2140 * to enable vreg is needed to balance disable vreg
2141 */
2142static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
2143{
2144 int ret;
2145
2146 ret = sdhci_msm_setup_vreg(pdata, 1, true);
2147 if (ret)
2148 return ret;
2149 ret = sdhci_msm_setup_vreg(pdata, 0, true);
2150 return ret;
2151}
2152
2153/* This init function should be called only once for each SDHC slot */
2154static int sdhci_msm_vreg_init(struct device *dev,
2155 struct sdhci_msm_pltfm_data *pdata,
2156 bool is_init)
2157{
2158 int ret = 0;
2159 struct sdhci_msm_slot_reg_data *curr_slot;
2160 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
2161
2162 curr_slot = pdata->vreg_data;
2163 if (!curr_slot)
2164 goto out;
2165
2166 curr_vdd_reg = curr_slot->vdd_data;
2167 curr_vdd_io_reg = curr_slot->vdd_io_data;
2168
2169 if (!is_init)
2170 /* Deregister all regulators from regulator framework */
2171 goto vdd_io_reg_deinit;
2172
2173 /*
2174 * Get the regulator handle from voltage regulator framework
2175 * and then try to set the voltage level for the regulator
2176 */
2177 if (curr_vdd_reg) {
2178 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
2179 if (ret)
2180 goto out;
2181 }
2182 if (curr_vdd_io_reg) {
2183 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
2184 if (ret)
2185 goto vdd_reg_deinit;
2186 }
2187 ret = sdhci_msm_vreg_reset(pdata);
2188 if (ret)
2189 dev_err(dev, "vreg reset failed (%d)\n", ret);
2190 goto out;
2191
2192vdd_io_reg_deinit:
2193 if (curr_vdd_io_reg)
2194 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
2195vdd_reg_deinit:
2196 if (curr_vdd_reg)
2197 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
2198out:
2199 return ret;
2200}
2201
2202
2203static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
2204 enum vdd_io_level level,
2205 unsigned int voltage_level)
2206{
2207 int ret = 0;
2208 int set_level;
2209 struct sdhci_msm_reg_data *vdd_io_reg;
2210
2211 if (!pdata->vreg_data)
2212 return ret;
2213
2214 vdd_io_reg = pdata->vreg_data->vdd_io_data;
2215 if (vdd_io_reg && vdd_io_reg->is_enabled) {
2216 switch (level) {
2217 case VDD_IO_LOW:
2218 set_level = vdd_io_reg->low_vol_level;
2219 break;
2220 case VDD_IO_HIGH:
2221 set_level = vdd_io_reg->high_vol_level;
2222 break;
2223 case VDD_IO_SET_LEVEL:
2224 set_level = voltage_level;
2225 break;
2226 default:
2227 pr_err("%s: invalid argument level = %d",
2228 __func__, level);
2229 ret = -EINVAL;
2230 return ret;
2231 }
2232 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
2233 set_level);
2234 }
2235 return ret;
2236}
2237
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302238void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
2239{
2240 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2241 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2242
2243 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
2244 mmc_hostname(host->mmc),
2245 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
2246 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
2247 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
2248}
2249
Asutosh Das0ef24812012-12-18 16:14:02 +05302250static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
2251{
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002252 struct sdhci_host *host = (struct sdhci_host *)data;
2253 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2254 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das0ef24812012-12-18 16:14:02 +05302255 u8 irq_status = 0;
2256 u8 irq_ack = 0;
2257 int ret = 0;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302258 int pwr_state = 0, io_level = 0;
2259 unsigned long flags;
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302260 int retry = 10;
Asutosh Das0ef24812012-12-18 16:14:02 +05302261
2262 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2263 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
2264 mmc_hostname(msm_host->mmc), irq, irq_status);
2265
2266 /* Clear the interrupt */
2267 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2268 /*
2269 * SDHC has core_mem and hc_mem device memory and these memory
2270 * addresses do not fall within 1KB region. Hence, any update to
2271 * core_mem address space would require an mb() to ensure this gets
2272 * completed before its next update to registers within hc_mem.
2273 */
2274 mb();
Pavan Anamula53ffa0b2015-07-22 21:46:32 +05302275 /*
2276 * There is a rare HW scenario where the first clear pulse could be
2277 * lost when actual reset and clear/read of status register is
2278 * happening at a time. Hence, retry for at least 10 times to make
2279 * sure status register is cleared. Otherwise, this will result in
2280 * a spurious power IRQ resulting in system instability.
2281 */
2282 while (irq_status &
2283 readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
2284 if (retry == 0) {
2285 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
2286 mmc_hostname(host->mmc), irq_status);
2287 sdhci_msm_dump_pwr_ctrl_regs(host);
2288 BUG_ON(1);
2289 }
2290 writeb_relaxed(irq_status,
2291 (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2292 retry--;
2293 udelay(10);
2294 }
2295 if (likely(retry < 10))
2296 pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
2297 mmc_hostname(host->mmc), irq_status, retry);
Asutosh Das0ef24812012-12-18 16:14:02 +05302298
2299 /* Handle BUS ON/OFF*/
2300 if (irq_status & CORE_PWRCTL_BUS_ON) {
2301 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302302 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302303 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302304 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2305 VDD_IO_HIGH, 0);
2306 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302307 if (ret)
2308 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2309 else
2310 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302311
2312 pwr_state = REQ_BUS_ON;
2313 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302314 }
2315 if (irq_status & CORE_PWRCTL_BUS_OFF) {
2316 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302317 if (!ret) {
Asutosh Das0ef24812012-12-18 16:14:02 +05302318 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302319 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
2320 VDD_IO_LOW, 0);
2321 }
Asutosh Das0ef24812012-12-18 16:14:02 +05302322 if (ret)
2323 irq_ack |= CORE_PWRCTL_BUS_FAIL;
2324 else
2325 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302326
2327 pwr_state = REQ_BUS_OFF;
2328 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302329 }
2330 /* Handle IO LOW/HIGH */
2331 if (irq_status & CORE_PWRCTL_IO_LOW) {
2332 /* Switch voltage Low */
2333 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
2334 if (ret)
2335 irq_ack |= CORE_PWRCTL_IO_FAIL;
2336 else
2337 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302338
2339 io_level = REQ_IO_LOW;
Asutosh Das0ef24812012-12-18 16:14:02 +05302340 }
2341 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2342 /* Switch voltage High */
2343 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2344 if (ret)
2345 irq_ack |= CORE_PWRCTL_IO_FAIL;
2346 else
2347 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302348
2349 io_level = REQ_IO_HIGH;
Asutosh Das0ef24812012-12-18 16:14:02 +05302350 }
2351
2352 /* ACK status to the core */
2353 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2354 /*
2355 * SDHC has core_mem and hc_mem device memory and these memory
2356 * addresses do not fall within 1KB region. Hence, any update to
2357 * core_mem address space would require an mb() to ensure this gets
2358 * completed before its next update to registers within hc_mem.
2359 */
2360 mb();
2361
Krishna Konda46fd1432014-10-30 21:13:27 -07002362 if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002363 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2364 ~CORE_IO_PAD_PWR_SWITCH),
2365 host->ioaddr + CORE_VENDOR_SPEC);
Krishna Konda46fd1432014-10-30 21:13:27 -07002366 else if ((io_level & REQ_IO_LOW) ||
2367 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002368 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2369 CORE_IO_PAD_PWR_SWITCH),
2370 host->ioaddr + CORE_VENDOR_SPEC);
2371 mb();
2372
Asutosh Das0ef24812012-12-18 16:14:02 +05302373 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2374 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302375 spin_lock_irqsave(&host->lock, flags);
2376 if (pwr_state)
2377 msm_host->curr_pwr_state = pwr_state;
2378 if (io_level)
2379 msm_host->curr_io_level = io_level;
2380 complete(&msm_host->pwr_irq_completion);
2381 spin_unlock_irqrestore(&host->lock, flags);
2382
Asutosh Das0ef24812012-12-18 16:14:02 +05302383 return IRQ_HANDLED;
2384}
2385
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302386static ssize_t
Sahitya Tummala5c55b932013-06-20 14:00:18 +05302387show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2388{
2389 struct sdhci_host *host = dev_get_drvdata(dev);
2390 int poll;
2391 unsigned long flags;
2392
2393 spin_lock_irqsave(&host->lock, flags);
2394 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2395 spin_unlock_irqrestore(&host->lock, flags);
2396
2397 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2398}
2399
2400static ssize_t
2401store_polling(struct device *dev, struct device_attribute *attr,
2402 const char *buf, size_t count)
2403{
2404 struct sdhci_host *host = dev_get_drvdata(dev);
2405 int value;
2406 unsigned long flags;
2407
2408 if (!kstrtou32(buf, 0, &value)) {
2409 spin_lock_irqsave(&host->lock, flags);
2410 if (value) {
2411 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2412 mmc_detect_change(host->mmc, 0);
2413 } else {
2414 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2415 }
2416 spin_unlock_irqrestore(&host->lock, flags);
2417 }
2418 return count;
2419}
2420
2421static ssize_t
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05302422show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2423 char *buf)
2424{
2425 struct sdhci_host *host = dev_get_drvdata(dev);
2426 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2427 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2428
2429 return snprintf(buf, PAGE_SIZE, "%u\n",
2430 msm_host->msm_bus_vote.is_max_bw_needed);
2431}
2432
2433static ssize_t
2434store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2435 const char *buf, size_t count)
2436{
2437 struct sdhci_host *host = dev_get_drvdata(dev);
2438 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2439 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2440 uint32_t value;
2441 unsigned long flags;
2442
2443 if (!kstrtou32(buf, 0, &value)) {
2444 spin_lock_irqsave(&host->lock, flags);
2445 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2446 spin_unlock_irqrestore(&host->lock, flags);
2447 }
2448 return count;
2449}
2450
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302451static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das0ef24812012-12-18 16:14:02 +05302452{
2453 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2454 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302455 unsigned long flags;
2456 bool done = false;
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302457 u32 io_sig_sts;
Asutosh Das0ef24812012-12-18 16:14:02 +05302458
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302459 spin_lock_irqsave(&host->lock, flags);
2460 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2461 mmc_hostname(host->mmc), __func__, req_type,
2462 msm_host->curr_pwr_state, msm_host->curr_io_level);
Sahitya Tummala481fbb02013-08-06 15:22:28 +05302463 io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
2464 /*
2465 * The IRQ for request type IO High/Low will be generated when -
2466 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
2467 * 2. If 1 is true and when there is a state change in 1.8V enable
2468 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
2469 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
2470 * layer tries to set it to 3.3V before card detection happens, the
2471 * IRQ doesn't get triggered as there is no state change in this bit.
2472 * The driver already handles this case by changing the IO voltage
2473 * level to high as part of controller power up sequence. Hence, check
2474 * for host->pwr to handle a case where IO voltage high request is
2475 * issued even before controller power up.
2476 */
2477 if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
2478 if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
2479 ((req_type & REQ_IO_HIGH) && !host->pwr)) {
2480 pr_debug("%s: do not wait for power IRQ that never comes\n",
2481 mmc_hostname(host->mmc));
2482 spin_unlock_irqrestore(&host->lock, flags);
2483 return;
2484 }
2485 }
2486
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302487 if ((req_type & msm_host->curr_pwr_state) ||
2488 (req_type & msm_host->curr_io_level))
2489 done = true;
2490 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das0ef24812012-12-18 16:14:02 +05302491
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05302492 /*
2493 * This is needed here to hanlde a case where IRQ gets
2494 * triggered even before this function is called so that
2495 * x->done counter of completion gets reset. Otherwise,
2496 * next call to wait_for_completion returns immediately
2497 * without actually waiting for the IRQ to be handled.
2498 */
2499 if (done)
2500 init_completion(&msm_host->pwr_irq_completion);
2501 else
2502 wait_for_completion(&msm_host->pwr_irq_completion);
2503
2504 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2505 __func__, req_type);
Asutosh Das0ef24812012-12-18 16:14:02 +05302506}
2507
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002508static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2509{
Ritesh Harjani8e36f662014-11-14 11:09:56 +05302510 u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
2511
2512 if (enable) {
2513 config |= CORE_CDR_EN;
2514 config &= ~CORE_CDR_EXT_EN;
2515 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2516 } else {
2517 config &= ~CORE_CDR_EN;
2518 config |= CORE_CDR_EXT_EN;
2519 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
2520 }
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07002521}
2522
Asutosh Das648f9d12013-01-10 21:11:04 +05302523static unsigned int sdhci_msm_max_segs(void)
2524{
2525 return SDHCI_MSM_MAX_SEGMENTS;
2526}
2527
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302528static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302529{
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302530 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2531 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302532
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302533 return msm_host->pdata->sup_clk_table[0];
2534}
2535
2536static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2537{
2538 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2539 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2540 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2541
2542 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2543}
2544
2545static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2546 u32 req_clk)
2547{
2548 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2549 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2550 unsigned int sel_clk = -1;
2551 unsigned char cnt;
2552
2553 if (req_clk < sdhci_msm_get_min_clock(host)) {
2554 sel_clk = sdhci_msm_get_min_clock(host);
2555 return sel_clk;
2556 }
2557
2558 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2559 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2560 break;
2561 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2562 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2563 break;
2564 } else {
2565 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2566 }
2567 }
2568 return sel_clk;
2569}
2570
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302571static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2572{
2573 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2574 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2575 int rc = 0;
2576
2577 if (atomic_read(&msm_host->controller_clock))
2578 return 0;
2579
2580 sdhci_msm_bus_voting(host, 1);
2581
2582 if (!IS_ERR(msm_host->pclk)) {
2583 rc = clk_prepare_enable(msm_host->pclk);
2584 if (rc) {
2585 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2586 mmc_hostname(host->mmc), __func__, rc);
2587 goto remove_vote;
2588 }
2589 }
2590
2591 rc = clk_prepare_enable(msm_host->clk);
2592 if (rc) {
2593 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2594 mmc_hostname(host->mmc), __func__, rc);
2595 goto disable_pclk;
2596 }
2597
2598 atomic_set(&msm_host->controller_clock, 1);
2599 pr_debug("%s: %s: enabled controller clock\n",
2600 mmc_hostname(host->mmc), __func__);
2601 goto out;
2602
2603disable_pclk:
2604 if (!IS_ERR(msm_host->pclk))
2605 clk_disable_unprepare(msm_host->pclk);
2606remove_vote:
2607 if (msm_host->msm_bus_vote.client_handle)
2608 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2609out:
2610 return rc;
2611}
2612
2613
2614
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302615static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2616{
2617 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2618 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2619 int rc = 0;
2620
2621 if (enable && !atomic_read(&msm_host->clks_on)) {
2622 pr_debug("%s: request to enable clocks\n",
2623 mmc_hostname(host->mmc));
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302624
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302625 /*
2626 * The bus-width or the clock rate might have changed
2627 * after controller clocks are enbaled, update bus vote
2628 * in such case.
2629 */
2630 if (atomic_read(&msm_host->controller_clock))
2631 sdhci_msm_bus_voting(host, 1);
2632
2633 rc = sdhci_msm_enable_controller_clock(host);
2634 if (rc)
2635 goto remove_vote;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302636
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302637 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2638 rc = clk_prepare_enable(msm_host->bus_clk);
2639 if (rc) {
2640 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2641 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302642 goto disable_controller_clk;
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302643 }
2644 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002645 if (!IS_ERR(msm_host->ff_clk)) {
2646 rc = clk_prepare_enable(msm_host->ff_clk);
2647 if (rc) {
2648 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2649 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302650 goto disable_bus_clk;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002651 }
2652 }
2653 if (!IS_ERR(msm_host->sleep_clk)) {
2654 rc = clk_prepare_enable(msm_host->sleep_clk);
2655 if (rc) {
2656 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2657 mmc_hostname(host->mmc), __func__, rc);
2658 goto disable_ff_clk;
2659 }
2660 }
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302661 mb();
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302662
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302663 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302664 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2665 mb();
Sahitya Tummaladc182982013-08-20 15:32:09 +05302666 /*
2667 * During 1.8V signal switching the clock source must
2668 * still be ON as it requires accessing SDHC
2669 * registers (SDHCi host control2 register bit 3 must
2670 * be written and polled after stopping the SDCLK).
2671 */
2672 if (host->mmc->card_clock_off)
2673 return 0;
2674 pr_debug("%s: request to disable clocks\n",
2675 mmc_hostname(host->mmc));
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002676 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2677 clk_disable_unprepare(msm_host->sleep_clk);
2678 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2679 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302680 clk_disable_unprepare(msm_host->clk);
2681 if (!IS_ERR(msm_host->pclk))
2682 clk_disable_unprepare(msm_host->pclk);
2683 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2684 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302685
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302686 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302687 sdhci_msm_bus_voting(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302688 }
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302689 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302690 goto out;
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002691disable_ff_clk:
2692 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2693 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302694disable_bus_clk:
2695 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2696 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05302697disable_controller_clk:
2698 if (!IS_ERR_OR_NULL(msm_host->clk))
2699 clk_disable_unprepare(msm_host->clk);
2700 if (!IS_ERR_OR_NULL(msm_host->pclk))
2701 clk_disable_unprepare(msm_host->pclk);
2702 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302703remove_vote:
2704 if (msm_host->msm_bus_vote.client_handle)
2705 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302706out:
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302707 return rc;
2708}
2709
2710static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2711{
2712 int rc;
2713 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2714 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2715 struct mmc_ios curr_ios = host->mmc->ios;
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002716 u32 sup_clock, ddr_clock, dll_lock;
Sahitya Tummala043744a2013-06-24 09:55:33 +05302717 bool curr_pwrsave;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302718
2719 if (!clock) {
Sujit Reddy Thummabf1aecc2014-01-10 10:58:54 +05302720 /*
2721 * disable pwrsave to ensure clock is not auto-gated until
2722 * the rate is >400KHz (initialization complete).
2723 */
2724 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2725 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302726 sdhci_msm_prepare_clocks(host, false);
2727 host->clock = clock;
2728 goto out;
2729 }
2730
2731 rc = sdhci_msm_prepare_clocks(host, true);
2732 if (rc)
2733 goto out;
2734
Sahitya Tummala043744a2013-06-24 09:55:33 +05302735 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2736 CORE_CLK_PWRSAVE);
Sahitya Tummalae000b242013-08-29 16:21:08 +05302737 if ((clock > 400000) &&
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002738 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302739 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2740 | CORE_CLK_PWRSAVE,
2741 host->ioaddr + CORE_VENDOR_SPEC);
2742 /*
2743 * Disable pwrsave for a newly added card if doesn't allow clock
2744 * gating.
2745 */
Venkat Gopalakrishnanc0a367272015-02-24 13:09:09 -08002746 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
Sahitya Tummala043744a2013-06-24 09:55:33 +05302747 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2748 & ~CORE_CLK_PWRSAVE,
2749 host->ioaddr + CORE_VENDOR_SPEC);
2750
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302751 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002752 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002753 (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002754 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302755 /*
2756 * The SDHC requires internal clock frequency to be double the
2757 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002758 * uses the faster clock(100/400MHz) for some of its parts and
2759 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302760 */
2761 ddr_clock = clock * 2;
2762 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2763 ddr_clock);
2764 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002765
2766 /*
2767 * In general all timing modes are controlled via UHS mode select in
2768 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2769 * their respective modes defined here, hence we use these values.
2770 *
2771 * HS200 - SDR104 (Since they both are equivalent in functionality)
2772 * HS400 - This involves multiple configurations
2773 * Initially SDR104 - when tuning is required as HS200
2774 * Then when switching to DDR @ 400MHz (HS400) we use
2775 * the vendor specific HC_SELECT_IN to control the mode.
2776 *
2777 * In addition to controlling the modes we also need to select the
2778 * correct input clock for DLL depending on the mode.
2779 *
2780 * HS400 - divided clock (free running MCLK/2)
2781 * All other modes - default (free running MCLK)
2782 */
2783 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2784 /* Select the divided clock (free running MCLK/2) */
2785 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2786 & ~CORE_HC_MCLK_SEL_MASK)
2787 | CORE_HC_MCLK_SEL_HS400),
2788 host->ioaddr + CORE_VENDOR_SPEC);
2789 /*
2790 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2791 * register
2792 */
Ritesh Harjani70e2a712015-08-25 11:34:16 +05302793 if ((msm_host->tuning_done ||
2794 (mmc_card_strobe(msm_host->mmc->card) &&
2795 msm_host->enhanced_strobe)) &&
2796 !msm_host->calibration_done) {
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002797 /*
2798 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2799 * field in VENDOR_SPEC_FUNC
2800 */
2801 writel_relaxed((readl_relaxed(host->ioaddr + \
2802 CORE_VENDOR_SPEC)
2803 | CORE_HC_SELECT_IN_HS400
2804 | CORE_HC_SELECT_IN_EN),
2805 host->ioaddr + CORE_VENDOR_SPEC);
2806 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002807 if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
2808 /*
2809 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
2810 * CORE_DLL_STATUS to be set. This should get set
2811 * with in 15 us at 200 MHz.
2812 */
2813 rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
2814 dll_lock, (dll_lock & (CORE_DLL_LOCK |
2815 CORE_DDR_DLL_LOCK)), 10, 1000);
2816 if (rc == -ETIMEDOUT)
2817 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
2818 mmc_hostname(host->mmc),
2819 dll_lock);
2820 }
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002821 } else {
Krishna Konda2faa7bb2014-06-04 01:25:16 -07002822 if (!msm_host->use_cdclp533)
2823 /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
2824 writel_relaxed((readl_relaxed(host->ioaddr +
2825 CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
2826 host->ioaddr + CORE_VENDOR_SPEC3);
2827
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002828 /* Select the default clock (free running MCLK) */
2829 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2830 & ~CORE_HC_MCLK_SEL_MASK)
2831 | CORE_HC_MCLK_SEL_DFLT),
2832 host->ioaddr + CORE_VENDOR_SPEC);
2833
2834 /*
2835 * Disable HC_SELECT_IN to be able to use the UHS mode select
2836 * configuration from Host Control2 register for all other
2837 * modes.
2838 *
2839 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2840 * in VENDOR_SPEC_FUNC
2841 */
2842 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2843 & ~CORE_HC_SELECT_IN_EN
2844 & ~CORE_HC_SELECT_IN_MASK),
2845 host->ioaddr + CORE_VENDOR_SPEC);
2846 }
2847 mb();
2848
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302849 if (sup_clock != msm_host->clk_rate) {
2850 pr_debug("%s: %s: setting clk rate to %u\n",
2851 mmc_hostname(host->mmc), __func__, sup_clock);
2852 rc = clk_set_rate(msm_host->clk, sup_clock);
2853 if (rc) {
2854 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2855 mmc_hostname(host->mmc), __func__,
2856 sup_clock, rc);
2857 goto out;
2858 }
2859 msm_host->clk_rate = sup_clock;
2860 host->clock = clock;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05302861 /*
2862 * Update the bus vote in case of frequency change due to
2863 * clock scaling.
2864 */
2865 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302866 }
2867out:
2868 sdhci_set_clock(host, clock);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05302869}
2870
Sahitya Tummala14613432013-03-21 11:13:25 +05302871static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2872 unsigned int uhs)
2873{
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002874 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2875 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala14613432013-03-21 11:13:25 +05302876 u16 ctrl_2;
2877
2878 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2879 /* Select Bus Speed Mode for host */
2880 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002881 if ((uhs == MMC_TIMING_MMC_HS400) ||
2882 (uhs == MMC_TIMING_MMC_HS200) ||
2883 (uhs == MMC_TIMING_UHS_SDR104))
Sahitya Tummala14613432013-03-21 11:13:25 +05302884 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2885 else if (uhs == MMC_TIMING_UHS_SDR12)
2886 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2887 else if (uhs == MMC_TIMING_UHS_SDR25)
2888 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2889 else if (uhs == MMC_TIMING_UHS_SDR50)
2890 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
Venkat Gopalakrishnan0a29da92015-01-09 12:19:16 -08002891 else if ((uhs == MMC_TIMING_UHS_DDR50) ||
2892 (uhs == MMC_TIMING_MMC_DDR52))
Sahitya Tummala14613432013-03-21 11:13:25 +05302893 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302894 /*
2895 * When clock frquency is less than 100MHz, the feedback clock must be
2896 * provided and DLL must not be used so that tuning can be skipped. To
2897 * provide feedback clock, the mode selection can be any value less
2898 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2899 */
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002900 if (host->clock <= CORE_FREQ_100MHZ) {
2901 if ((uhs == MMC_TIMING_MMC_HS400) ||
2902 (uhs == MMC_TIMING_MMC_HS200) ||
2903 (uhs == MMC_TIMING_UHS_SDR104))
2904 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05302905
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07002906 /*
2907 * Make sure DLL is disabled when not required
2908 *
2909 * Write 1 to DLL_RST bit of DLL_CONFIG register
2910 */
2911 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2912 | CORE_DLL_RST),
2913 host->ioaddr + CORE_DLL_CONFIG);
2914
2915 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2916 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2917 | CORE_DLL_PDN),
2918 host->ioaddr + CORE_DLL_CONFIG);
2919 mb();
2920
2921 /*
2922 * The DLL needs to be restored and CDCLP533 recalibrated
2923 * when the clock frequency is set back to 400MHz.
2924 */
2925 msm_host->calibration_done = false;
2926 }
2927
2928 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2929 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala14613432013-03-21 11:13:25 +05302930 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2931
2932}
2933
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002934#define MAX_TEST_BUS 60
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002935#define DRV_NAME "cmdq-host"
2936static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_msm_host *msm_host)
2937{
2938 int i = 0;
2939 struct cmdq_host *cq_host = mmc_cmdq_private(msm_host->mmc);
2940 u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2941 u16 minor = version & CORE_VERSION_TARGET_MASK;
2942 /* registers offset changed starting from 4.2.0 */
2943 int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
2944
2945 pr_err("---- Debug RAM dump ----\n");
2946 pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
2947 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
2948 cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
2949
2950 while (i < 16) {
2951 pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
2952 cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
2953 i++;
2954 }
2955 pr_err("-------------------------\n");
2956}
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302957
2958void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2959{
2960 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2961 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2962 int tbsel, tbsel2;
2963 int i, index = 0;
2964 u32 test_bus_val = 0;
2965 u32 debug_reg[MAX_TEST_BUS] = {0};
2966
2967 pr_info("----------- VENDOR REGISTER DUMP -----------\n");
Subhash Jadavania7a36b82015-10-16 18:33:25 -07002968 if (host->cq_host)
2969 sdhci_msm_cmdq_dump_debug_ram(msm_host);
Konstantin Dorfman98543bf2015-10-01 17:56:54 +03002970
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302971 pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
2972 readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
2973 readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
2974 readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
2975 pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
2976 readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
2977 readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
2978 readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
2979 pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
2980 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
2981 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
2982 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05302983 pr_info("Vndr func2: 0x%08x\n",
2984 readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302985
2986 /*
2987 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
2988 * of CORE_TESTBUS_CONFIG register.
2989 *
2990 * To select test bus 0 to 7 use tbsel and to select any test bus
2991 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
2992 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
2993 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
2994 */
Venkat Gopalakrishnan34811972015-03-04 14:39:01 -08002995 for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
Sahitya Tummala67717bc2013-08-02 09:21:37 +05302996 for (tbsel = 0; tbsel < 8; tbsel++) {
2997 if (index >= MAX_TEST_BUS)
2998 break;
2999 test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
3000 tbsel | CORE_TESTBUS_ENA;
3001 writel_relaxed(test_bus_val,
3002 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3003 debug_reg[index++] = readl_relaxed(msm_host->core_mem +
3004 CORE_SDCC_DEBUG_REG);
3005 }
3006 }
3007 for (i = 0; i < MAX_TEST_BUS; i = i + 4)
3008 pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
3009 i, i + 3, debug_reg[i], debug_reg[i+1],
3010 debug_reg[i+2], debug_reg[i+3]);
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003011}
3012
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303013/*
3014 * sdhci_msm_enhanced_strobe_mask :-
3015 * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
3016 * SW should write 3 to
3017 * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
3018 * The default reset value of this register is 2.
3019 */
3020static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
3021{
3022 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3023 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3024
Ritesh Harjani70e2a712015-08-25 11:34:16 +05303025 if (!msm_host->enhanced_strobe ||
3026 !mmc_card_strobe(msm_host->mmc->card)) {
3027 pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303028 mmc_hostname(host->mmc));
3029 return;
3030 }
3031
3032 if (set) {
3033 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3034 | CORE_CMDEN_HS400_INPUT_MASK_CNT),
3035 host->ioaddr + CORE_VENDOR_SPEC3);
3036 } else {
3037 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
3038 & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
3039 host->ioaddr + CORE_VENDOR_SPEC3);
3040 }
3041}
3042
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003043static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
3044{
3045 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3046 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3047
3048 if (set) {
3049 writel_relaxed(CORE_TESTBUS_ENA,
3050 msm_host->core_mem + CORE_TESTBUS_CONFIG);
3051 } else {
3052 u32 value;
3053
3054 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
3055 value &= ~CORE_TESTBUS_ENA;
3056 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
3057 }
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303058}
3059
Dov Levenglick9c575e22015-07-20 09:30:52 +03003060static void sdhci_msm_detect(struct sdhci_host *host, bool detected)
3061{
3062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3063 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3064 struct mmc_host *mmc = msm_host->mmc;
3065 struct mmc_card *card = mmc->card;
3066
3067 if (detected && mmc_card_sdio(card))
3068 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3069 else
3070 mmc->pm_caps &= ~MMC_PM_KEEP_POWER;
3071}
3072
Pavan Anamula691dd592015-08-25 16:11:20 +05303073void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
3074{
3075 u32 vendor_func2;
3076 unsigned long timeout;
3077
3078 vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3079
3080 if (enable) {
3081 writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
3082 CORE_VENDOR_SPEC_FUNC2);
3083 timeout = 10000;
3084 while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
3085 HC_SW_RST_REQ) {
3086 if (timeout == 0) {
3087 pr_info("%s: Applying wait idle disable workaround\n",
3088 mmc_hostname(host->mmc));
3089 /*
3090 * Apply the reset workaround to not wait for
3091 * pending data transfers on AXI before
3092 * resetting the controller. This could be
3093 * risky if the transfers were stuck on the
3094 * AXI bus.
3095 */
3096 vendor_func2 = readl_relaxed(host->ioaddr +
3097 CORE_VENDOR_SPEC_FUNC2);
3098 writel_relaxed(vendor_func2 |
3099 HC_SW_RST_WAIT_IDLE_DIS,
3100 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3101 host->reset_wa_t = ktime_get();
3102 return;
3103 }
3104 timeout--;
3105 udelay(10);
3106 }
3107 pr_info("%s: waiting for SW_RST_REQ is successful\n",
3108 mmc_hostname(host->mmc));
3109 } else {
3110 writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
3111 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3112 }
3113}
3114
Gilad Broner44445992015-09-29 16:05:39 +03003115static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
3116{
3117 struct sdhci_msm_pm_qos_irq *pm_qos_irq =
3118 container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
3119
3120 if (atomic_read(&pm_qos_irq->counter))
3121 return;
3122
3123 pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
3124 pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
3125}
3126
3127void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
3128{
3129 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3130 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3131 struct sdhci_msm_pm_qos_latency *latency =
3132 &msm_host->pdata->pm_qos_data.irq_latency;
3133 int counter;
3134
3135 if (!msm_host->pm_qos_irq.enabled)
3136 return;
3137
3138 counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
3139 /* Make sure to update the voting in case power policy has changed */
3140 if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
3141 && counter > 1)
3142 return;
3143
3144 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3145 msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
3146 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3147 msm_host->pm_qos_irq.latency);
3148}
3149
3150void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
3151{
3152 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3153 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3154 int counter;
3155
3156 if (!msm_host->pm_qos_irq.enabled)
3157 return;
3158
Subhash Jadavani4d813902015-10-15 12:16:43 -07003159 if (atomic_read(&msm_host->pm_qos_irq.counter)) {
3160 counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
3161 } else {
3162 WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
3163 return;
Gilad Broner44445992015-09-29 16:05:39 +03003164 }
Subhash Jadavani4d813902015-10-15 12:16:43 -07003165
Gilad Broner44445992015-09-29 16:05:39 +03003166 if (counter)
3167 return;
3168
3169 if (async) {
3170 schedule_work(&msm_host->pm_qos_irq.unvote_work);
3171 return;
3172 }
3173
3174 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3175 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3176 msm_host->pm_qos_irq.latency);
3177}
3178
Gilad Broner68c54562015-09-20 11:59:46 +03003179static ssize_t
3180sdhci_msm_pm_qos_irq_show(struct device *dev,
3181 struct device_attribute *attr, char *buf)
3182{
3183 struct sdhci_host *host = dev_get_drvdata(dev);
3184 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3185 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3186 struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
3187
3188 return snprintf(buf, PAGE_SIZE,
3189 "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
3190 irq->enabled, atomic_read(&irq->counter), irq->latency);
3191}
3192
3193static ssize_t
3194sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
3195 struct device_attribute *attr, char *buf)
3196{
3197 struct sdhci_host *host = dev_get_drvdata(dev);
3198 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3199 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3200
3201 return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
3202}
3203
3204static ssize_t
3205sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
3206 struct device_attribute *attr, const char *buf, size_t count)
3207{
3208 struct sdhci_host *host = dev_get_drvdata(dev);
3209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3210 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3211 uint32_t value;
3212 bool enable;
3213 int ret;
3214
3215 ret = kstrtou32(buf, 0, &value);
3216 if (ret)
3217 goto out;
3218 enable = !!value;
3219
3220 if (enable == msm_host->pm_qos_irq.enabled)
3221 goto out;
3222
3223 msm_host->pm_qos_irq.enabled = enable;
3224 if (!enable) {
3225 cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
3226 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3227 msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
3228 pm_qos_update_request(&msm_host->pm_qos_irq.req,
3229 msm_host->pm_qos_irq.latency);
3230 }
3231
3232out:
3233 return count;
3234}
3235
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003236#ifdef CONFIG_SMP
3237static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3238 struct sdhci_host *host)
3239{
3240 msm_host->pm_qos_irq.req.irq = host->irq;
3241}
3242#else
3243static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
3244 struct sdhci_host *host) { }
3245#endif
3246
Gilad Broner44445992015-09-29 16:05:39 +03003247void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
3248{
3249 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3250 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3251 struct sdhci_msm_pm_qos_latency *irq_latency;
Gilad Broner68c54562015-09-20 11:59:46 +03003252 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003253
3254 if (!msm_host->pdata->pm_qos_data.irq_valid)
3255 return;
3256
3257 /* Initialize only once as this gets called per partition */
3258 if (msm_host->pm_qos_irq.enabled)
3259 return;
3260
3261 atomic_set(&msm_host->pm_qos_irq.counter, 0);
3262 msm_host->pm_qos_irq.req.type =
3263 msm_host->pdata->pm_qos_data.irq_req_type;
Krishna Kondaf85e31a2015-10-23 11:43:02 -07003264 if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
3265 (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
3266 set_affine_irq(msm_host, host);
Gilad Broner44445992015-09-29 16:05:39 +03003267 else
3268 cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
3269 cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
3270
3271 INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
3272 sdhci_msm_pm_qos_irq_unvote_work);
3273 /* For initialization phase, set the performance latency */
3274 irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
3275 msm_host->pm_qos_irq.latency =
3276 irq_latency->latency[SDHCI_PERFORMANCE_MODE];
3277 pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
3278 msm_host->pm_qos_irq.latency);
3279 msm_host->pm_qos_irq.enabled = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003280
3281 /* sysfs */
3282 msm_host->pm_qos_irq.enable_attr.show =
3283 sdhci_msm_pm_qos_irq_enable_show;
3284 msm_host->pm_qos_irq.enable_attr.store =
3285 sdhci_msm_pm_qos_irq_enable_store;
3286 sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
3287 msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
3288 msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
3289 ret = device_create_file(&msm_host->pdev->dev,
3290 &msm_host->pm_qos_irq.enable_attr);
3291 if (ret)
3292 pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
3293 __func__, ret);
3294
3295 msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
3296 msm_host->pm_qos_irq.status_attr.store = NULL;
3297 sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
3298 msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
3299 msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
3300 ret = device_create_file(&msm_host->pdev->dev,
3301 &msm_host->pm_qos_irq.status_attr);
3302 if (ret)
3303 pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
3304 __func__, ret);
3305}
3306
3307static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
3308 struct device_attribute *attr, char *buf)
3309{
3310 struct sdhci_host *host = dev_get_drvdata(dev);
3311 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3312 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3313 struct sdhci_msm_pm_qos_group *group;
3314 int i;
3315 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3316 int offset = 0;
3317
3318 for (i = 0; i < nr_groups; i++) {
3319 group = &msm_host->pm_qos[i];
3320 offset += snprintf(&buf[offset], PAGE_SIZE,
3321 "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
3322 i, group->req.cpus_affine.bits[0],
3323 msm_host->pm_qos_group_enable,
3324 atomic_read(&group->counter),
3325 group->latency);
3326 }
3327
3328 return offset;
3329}
3330
3331static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
3332 struct device_attribute *attr, char *buf)
3333{
3334 struct sdhci_host *host = dev_get_drvdata(dev);
3335 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3336 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3337
3338 return snprintf(buf, PAGE_SIZE, "%s\n",
3339 msm_host->pm_qos_group_enable ? "enabled" : "disabled");
3340}
3341
3342static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
3343 struct device_attribute *attr, const char *buf, size_t count)
3344{
3345 struct sdhci_host *host = dev_get_drvdata(dev);
3346 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3347 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3348 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3349 uint32_t value;
3350 bool enable;
3351 int ret;
3352 int i;
3353
3354 ret = kstrtou32(buf, 0, &value);
3355 if (ret)
3356 goto out;
3357 enable = !!value;
3358
3359 if (enable == msm_host->pm_qos_group_enable)
3360 goto out;
3361
3362 msm_host->pm_qos_group_enable = enable;
3363 if (!enable) {
3364 for (i = 0; i < nr_groups; i++) {
3365 cancel_work_sync(&msm_host->pm_qos[i].unvote_work);
3366 atomic_set(&msm_host->pm_qos[i].counter, 0);
3367 msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
3368 pm_qos_update_request(&msm_host->pm_qos[i].req,
3369 msm_host->pm_qos[i].latency);
3370 }
3371 }
3372
3373out:
3374 return count;
Gilad Broner44445992015-09-29 16:05:39 +03003375}
3376
3377static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
3378{
3379 int i;
3380 struct sdhci_msm_cpu_group_map *map =
3381 &msm_host->pdata->pm_qos_data.cpu_group_map;
3382
3383 if (cpu < 0)
3384 goto not_found;
3385
3386 for (i = 0; i < map->nr_groups; i++)
3387 if (cpumask_test_cpu(cpu, &map->mask[i]))
3388 return i;
3389
3390not_found:
3391 return -EINVAL;
3392}
3393
3394void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
3395 struct sdhci_msm_pm_qos_latency *latency, int cpu)
3396{
3397 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3398 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3399 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3400 struct sdhci_msm_pm_qos_group *pm_qos_group;
3401 int counter;
3402
3403 if (!msm_host->pm_qos_group_enable || group < 0)
3404 return;
3405
3406 pm_qos_group = &msm_host->pm_qos[group];
3407 counter = atomic_inc_return(&pm_qos_group->counter);
3408
3409 /* Make sure to update the voting in case power policy has changed */
3410 if (pm_qos_group->latency == latency->latency[host->power_policy]
3411 && counter > 1)
3412 return;
3413
3414 cancel_work_sync(&pm_qos_group->unvote_work);
3415
3416 pm_qos_group->latency = latency->latency[host->power_policy];
3417 pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
3418}
3419
3420static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
3421{
3422 struct sdhci_msm_pm_qos_group *group =
3423 container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
3424
3425 if (atomic_read(&group->counter))
3426 return;
3427
3428 group->latency = PM_QOS_DEFAULT_VALUE;
3429 pm_qos_update_request(&group->req, group->latency);
3430}
3431
Gilad Broner07d92eb2015-09-29 16:57:21 +03003432bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
Gilad Broner44445992015-09-29 16:05:39 +03003433{
3434 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3435 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3436 int group = sdhci_msm_get_cpu_group(msm_host, cpu);
3437
3438 if (!msm_host->pm_qos_group_enable || group < 0 ||
3439 atomic_dec_return(&msm_host->pm_qos[group].counter))
Gilad Broner07d92eb2015-09-29 16:57:21 +03003440 return false;
Gilad Broner44445992015-09-29 16:05:39 +03003441
3442 if (async) {
3443 schedule_work(&msm_host->pm_qos[group].unvote_work);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003444 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003445 }
3446
3447 msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
3448 pm_qos_update_request(&msm_host->pm_qos[group].req,
3449 msm_host->pm_qos[group].latency);
Gilad Broner07d92eb2015-09-29 16:57:21 +03003450 return true;
Gilad Broner44445992015-09-29 16:05:39 +03003451}
3452
3453void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
3454 struct sdhci_msm_pm_qos_latency *latency)
3455{
3456 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3457 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3458 int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
3459 struct sdhci_msm_pm_qos_group *group;
3460 int i;
Gilad Broner68c54562015-09-20 11:59:46 +03003461 int ret;
Gilad Broner44445992015-09-29 16:05:39 +03003462
3463 if (msm_host->pm_qos_group_enable)
3464 return;
3465
3466 msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
3467 GFP_KERNEL);
3468 if (!msm_host->pm_qos)
3469 return;
3470
3471 for (i = 0; i < nr_groups; i++) {
3472 group = &msm_host->pm_qos[i];
3473 INIT_WORK(&group->unvote_work,
3474 sdhci_msm_pm_qos_cpu_unvote_work);
3475 atomic_set(&group->counter, 0);
3476 group->req.type = PM_QOS_REQ_AFFINE_CORES;
3477 cpumask_copy(&group->req.cpus_affine,
3478 &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
3479 /* For initialization phase, set the performance mode latency */
3480 group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
3481 pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
3482 group->latency);
3483 pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
3484 __func__, i,
3485 group->req.cpus_affine.bits[0],
3486 group->latency,
3487 &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
3488 }
Gilad Broner07d92eb2015-09-29 16:57:21 +03003489 msm_host->pm_qos_prev_cpu = -1;
Gilad Broner44445992015-09-29 16:05:39 +03003490 msm_host->pm_qos_group_enable = true;
Gilad Broner68c54562015-09-20 11:59:46 +03003491
3492 /* sysfs */
3493 msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
3494 msm_host->pm_qos_group_status_attr.store = NULL;
3495 sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
3496 msm_host->pm_qos_group_status_attr.attr.name =
3497 "pm_qos_cpu_groups_status";
3498 msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
3499 ret = device_create_file(&msm_host->pdev->dev,
3500 &msm_host->pm_qos_group_status_attr);
3501 if (ret)
3502 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
3503 __func__, ret);
3504 msm_host->pm_qos_group_enable_attr.show =
3505 sdhci_msm_pm_qos_group_enable_show;
3506 msm_host->pm_qos_group_enable_attr.store =
3507 sdhci_msm_pm_qos_group_enable_store;
3508 sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
3509 msm_host->pm_qos_group_enable_attr.attr.name =
3510 "pm_qos_cpu_groups_enable";
3511 msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
3512 ret = device_create_file(&msm_host->pdev->dev,
3513 &msm_host->pm_qos_group_enable_attr);
3514 if (ret)
3515 dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
3516 __func__, ret);
Gilad Broner44445992015-09-29 16:05:39 +03003517}
3518
Gilad Broner07d92eb2015-09-29 16:57:21 +03003519static void sdhci_msm_pre_req(struct sdhci_host *host,
3520 struct mmc_request *mmc_req)
3521{
3522 int cpu;
3523 int group;
3524 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3525 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3526 int prev_group = sdhci_msm_get_cpu_group(msm_host,
3527 msm_host->pm_qos_prev_cpu);
3528
3529 sdhci_msm_pm_qos_irq_vote(host);
3530
3531 cpu = get_cpu();
3532 put_cpu();
3533 group = sdhci_msm_get_cpu_group(msm_host, cpu);
3534 if (group < 0)
3535 return;
3536
3537 if (group != prev_group && prev_group >= 0) {
3538 sdhci_msm_pm_qos_cpu_unvote(host,
3539 msm_host->pm_qos_prev_cpu, false);
3540 prev_group = -1; /* make sure to vote for new group */
3541 }
3542
3543 if (prev_group < 0) {
3544 sdhci_msm_pm_qos_cpu_vote(host,
3545 msm_host->pdata->pm_qos_data.latency, cpu);
3546 msm_host->pm_qos_prev_cpu = cpu;
3547 }
3548}
3549
3550static void sdhci_msm_post_req(struct sdhci_host *host,
3551 struct mmc_request *mmc_req)
3552{
3553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3554 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3555
3556 sdhci_msm_pm_qos_irq_unvote(host, false);
3557
3558 if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
3559 msm_host->pm_qos_prev_cpu = -1;
3560}
3561
3562static void sdhci_msm_init(struct sdhci_host *host)
3563{
3564 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3565 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3566
3567 sdhci_msm_pm_qos_irq_init(host);
3568
3569 if (msm_host->pdata->pm_qos_data.legacy_valid)
3570 sdhci_msm_pm_qos_cpu_init(host,
3571 msm_host->pdata->pm_qos_data.latency);
3572}
3573
Asutosh Das0ef24812012-12-18 16:14:02 +05303574static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala14613432013-03-21 11:13:25 +05303575 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das0ef24812012-12-18 16:14:02 +05303576 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003577 .platform_execute_tuning = sdhci_msm_execute_tuning,
Ritesh Harjaniea709662015-05-27 15:40:24 +05303578 .enhanced_strobe = sdhci_msm_enhanced_strobe,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07003579 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das648f9d12013-01-10 21:11:04 +05303580 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303581 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303582 .get_min_clock = sdhci_msm_get_min_clock,
3583 .get_max_clock = sdhci_msm_get_max_clock,
Sahitya Tummala67717bc2013-08-02 09:21:37 +05303584 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
Asutosh Dase5e9ca62013-07-30 19:08:36 +05303585 .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303586 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Venkat Gopalakrishnanb8cb7072015-01-09 11:04:34 -08003587 .set_bus_width = sdhci_set_bus_width,
Venkat Gopalakrishnan411df072015-01-09 11:09:44 -08003588 .reset = sdhci_reset,
Venkat Gopalakrishnan06f7f792015-05-29 17:56:59 -07003589 .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
Ritesh Harjani4a5ffd12015-07-15 13:32:07 +05303590 .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
Dov Levenglick9c575e22015-07-20 09:30:52 +03003591 .detect = sdhci_msm_detect,
Pavan Anamula691dd592015-08-25 16:11:20 +05303592 .reset_workaround = sdhci_msm_reset_workaround,
Gilad Broner07d92eb2015-09-29 16:57:21 +03003593 .init = sdhci_msm_init,
3594 .pre_req = sdhci_msm_pre_req,
3595 .post_req = sdhci_msm_post_req,
Asutosh Das0ef24812012-12-18 16:14:02 +05303596};
3597
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303598static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
3599 struct sdhci_host *host)
3600{
Krishna Konda46fd1432014-10-30 21:13:27 -07003601 u32 version, caps = 0;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303602 u16 minor;
3603 u8 major;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303604 u32 val;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303605
3606 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
3607 major = (version & CORE_VERSION_MAJOR_MASK) >>
3608 CORE_VERSION_MAJOR_SHIFT;
3609 minor = version & CORE_VERSION_TARGET_MASK;
3610
Krishna Konda46fd1432014-10-30 21:13:27 -07003611 caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
3612
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303613 /*
3614 * Starting with SDCC 5 controller (core major version = 1)
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003615 * controller won't advertise 3.0v, 1.8v and 8-bit features
3616 * except for some targets.
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303617 */
3618 if (major >= 1 && minor != 0x11 && minor != 0x12) {
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003619 struct sdhci_msm_reg_data *vdd_io_reg;
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003620 /*
3621 * Enable 1.8V support capability on controllers that
3622 * support dual voltage
3623 */
3624 vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
Krishna Konda46fd1432014-10-30 21:13:27 -07003625 if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
3626 caps |= CORE_3_0V_SUPPORT;
3627 if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
Venkat Gopalakrishnan3ac8fd32014-08-28 18:15:45 -07003628 caps |= CORE_1_8V_SUPPORT;
Pratibhasagar Vada47992013-12-09 20:42:32 +05303629 if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
3630 caps |= CORE_8_BIT_SUPPORT;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303631 }
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003632
3633 /*
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303634 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
3635 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
3636 */
3637 if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
Pavan Anamula691dd592015-08-25 16:11:20 +05303638 host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
Sahitya Tummala2ba05ae2015-03-03 14:03:20 +05303639 val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3640 writel_relaxed((val | CORE_ONE_MID_EN),
3641 host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
3642 }
3643 /*
Krishna Konda2faa7bb2014-06-04 01:25:16 -07003644 * SDCC 5 controller with major version 1, minor version 0x34 and later
3645 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
3646 */
3647 if ((major == 1) && (minor < 0x34))
3648 msm_host->use_cdclp533 = true;
Gilad Broner2a10ca02014-10-02 17:20:35 +03003649
3650 /*
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003651 * SDCC 5 controller with major version 1, minor version 0x42 and later
3652 * will require additional steps when resetting DLL.
Ritesh Harjaniea709662015-05-27 15:40:24 +05303653 * It also supports HS400 enhanced strobe mode.
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003654 */
Ritesh Harjaniea709662015-05-27 15:40:24 +05303655 if ((major == 1) && (minor >= 0x42)) {
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003656 msm_host->use_updated_dll_reset = true;
Ritesh Harjaniea709662015-05-27 15:40:24 +05303657 msm_host->enhanced_strobe = true;
3658 }
Venkat Gopalakrishnanc0a5c3a2015-02-03 16:10:07 -08003659
3660 /*
Talel Shenhar9a25b882015-06-02 13:36:35 +03003661 * SDCC 5 controller with major version 1 and minor version 0x42,
3662 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
3663 * gating cannot guarantee MCLK timing requirement i.e.
Ritesh Harjani764065e2015-05-13 14:14:45 +05303664 * when MCLK is gated OFF, it is not gated for less than 0.5us
3665 * and MCLK must be switched on for at-least 1us before DATA
3666 * starts coming.
3667 */
Talel Shenhar9a25b882015-06-02 13:36:35 +03003668 if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
3669 (minor == 0x49)))
Ritesh Harjani764065e2015-05-13 14:14:45 +05303670 msm_host->use_14lpp_dll = true;
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003671
Pavan Anamula5a256df2015-10-16 14:38:28 +05303672 /* Fake 3.0V support for SDIO devices which requires such voltage */
3673 if (msm_host->pdata->core_3_0v_support) {
3674 caps |= CORE_3_0V_SUPPORT;
3675 writel_relaxed(
3676 (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
3677 caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
3678 }
3679
Venkat Gopalakrishnanb47cf402015-09-04 18:32:25 -07003680 if ((major == 1) && (minor >= 0x49))
3681 msm_host->rclk_delay_fix = true;
Ritesh Harjani764065e2015-05-13 14:14:45 +05303682 /*
Gilad Broner2a10ca02014-10-02 17:20:35 +03003683 * Mask 64-bit support for controller with 32-bit address bus so that
3684 * smaller descriptor size will be used and improve memory consumption.
Gilad Broner2a10ca02014-10-02 17:20:35 +03003685 */
Venkat Gopalakrishnan9a62e042015-03-03 16:14:55 -08003686 if (!msm_host->pdata->largeaddressbus)
3687 caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
3688
Gilad Broner2a10ca02014-10-02 17:20:35 +03003689 writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
Krishna Konda46fd1432014-10-30 21:13:27 -07003690 /* keep track of the value in SDHCI_CAPABILITIES */
3691 msm_host->caps_0 = caps;
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303692}
3693
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003694#ifdef CONFIG_MMC_CQ_HCI
3695static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3696 struct platform_device *pdev)
3697{
3698 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3699 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3700
3701 host->cq_host = cmdq_pltfm_init(pdev);
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003702 if (IS_ERR(host->cq_host)) {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003703 dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
3704 PTR_ERR(host->cq_host));
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003705 host->cq_host = NULL;
3706 } else {
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003707 msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
Subhash Jadavania7a36b82015-10-16 18:33:25 -07003708 }
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07003709}
3710#else
3711static void sdhci_msm_cmdq_init(struct sdhci_host *host,
3712 struct platform_device *pdev)
3713{
3714
3715}
3716#endif
3717
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003718static bool sdhci_msm_is_bootdevice(struct device *dev)
3719{
3720 if (strnstr(saved_command_line, "androidboot.bootdevice=",
3721 strlen(saved_command_line))) {
3722 char search_string[50];
3723
3724 snprintf(search_string, ARRAY_SIZE(search_string),
3725 "androidboot.bootdevice=%s", dev_name(dev));
3726 if (strnstr(saved_command_line, search_string,
3727 strlen(saved_command_line)))
3728 return true;
3729 else
3730 return false;
3731 }
3732
3733 /*
3734 * "androidboot.bootdevice=" argument is not present then
3735 * return true as we don't know the boot device anyways.
3736 */
3737 return true;
3738}
3739
Asutosh Das0ef24812012-12-18 16:14:02 +05303740static int sdhci_msm_probe(struct platform_device *pdev)
3741{
3742 struct sdhci_host *host;
3743 struct sdhci_pltfm_host *pltfm_host;
3744 struct sdhci_msm_host *msm_host;
3745 struct resource *core_memres = NULL;
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02003746 int ret = 0, dead = 0;
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003747 u16 host_version;
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003748 u32 irq_status, irq_ctl;
Sahitya Tummala079ed852015-10-29 20:18:45 +05303749 struct resource *tlmm_memres = NULL;
3750 void __iomem *tlmm_mem;
Asutosh Das0ef24812012-12-18 16:14:02 +05303751
3752 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
3753 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
3754 GFP_KERNEL);
3755 if (!msm_host) {
3756 ret = -ENOMEM;
3757 goto out;
3758 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303759
3760 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
3761 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
3762 if (IS_ERR(host)) {
3763 ret = PTR_ERR(host);
3764 goto out;
3765 }
3766
3767 pltfm_host = sdhci_priv(host);
3768 pltfm_host->priv = msm_host;
3769 msm_host->mmc = host->mmc;
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05303770 msm_host->pdev = pdev;
Asutosh Das0ef24812012-12-18 16:14:02 +05303771
3772 /* Extract platform data */
3773 if (pdev->dev.of_node) {
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003774 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
3775 if (ret < 0) {
3776 dev_err(&pdev->dev, "Failed to get slot index %d\n",
3777 ret);
3778 goto pltfm_free;
3779 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003780
3781 /* skip the probe if eMMC isn't a boot device */
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003782 if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
3783 ret = -ENODEV;
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003784 goto pltfm_free;
Venkat Gopalakrishnanb5dd7762015-09-10 12:25:27 -07003785 }
Subhash Jadavaniebcd00d2015-07-09 17:28:42 -07003786
Venkat Gopalakrishnan270580a2013-03-11 12:17:57 -07003787 if (disable_slots & (1 << (ret - 1))) {
3788 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
3789 ret);
3790 ret = -ENODEV;
3791 goto pltfm_free;
3792 }
3793
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003794 if (ret <= 2) {
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003795 sdhci_slot[ret-1] = msm_host;
Venkat Gopalakrishnan976e8cb2015-10-23 16:46:29 -07003796 host->slot_no = ret;
3797 }
Venkat Gopalakrishnan095ad972015-09-30 18:46:18 -07003798
Dov Levenglickc9033ab2015-03-10 16:00:56 +02003799 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
3800 msm_host);
Asutosh Das0ef24812012-12-18 16:14:02 +05303801 if (!msm_host->pdata) {
3802 dev_err(&pdev->dev, "DT parsing error\n");
3803 goto pltfm_free;
3804 }
3805 } else {
3806 dev_err(&pdev->dev, "No device tree node\n");
3807 goto pltfm_free;
3808 }
3809
3810 /* Setup Clocks */
3811
3812 /* Setup SDCC bus voter clock. */
3813 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
3814 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
3815 /* Vote for max. clk rate for max. performance */
3816 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
3817 if (ret)
3818 goto pltfm_free;
3819 ret = clk_prepare_enable(msm_host->bus_clk);
3820 if (ret)
3821 goto pltfm_free;
3822 }
3823
3824 /* Setup main peripheral bus clock */
3825 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
3826 if (!IS_ERR(msm_host->pclk)) {
3827 ret = clk_prepare_enable(msm_host->pclk);
3828 if (ret)
3829 goto bus_clk_disable;
3830 }
Asutosh Das9d7ee2f2013-11-08 12:33:47 +05303831 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das0ef24812012-12-18 16:14:02 +05303832
3833 /* Setup SDC MMC clock */
3834 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
3835 if (IS_ERR(msm_host->clk)) {
3836 ret = PTR_ERR(msm_host->clk);
3837 goto pclk_disable;
3838 }
3839
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303840 /* Set to the minimum supported clock frequency */
3841 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
3842 if (ret) {
3843 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303844 goto pclk_disable;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303845 }
Sahitya Tummala020ede0d2013-06-07 13:03:07 +05303846 ret = clk_prepare_enable(msm_host->clk);
3847 if (ret)
3848 goto pclk_disable;
3849
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303850 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05303851 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303852
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003853 /* Setup CDC calibration fixed feedback clock */
3854 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
3855 if (!IS_ERR(msm_host->ff_clk)) {
3856 ret = clk_prepare_enable(msm_host->ff_clk);
3857 if (ret)
3858 goto clk_disable;
3859 }
3860
3861 /* Setup CDC calibration sleep clock */
3862 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
3863 if (!IS_ERR(msm_host->sleep_clk)) {
3864 ret = clk_prepare_enable(msm_host->sleep_clk);
3865 if (ret)
3866 goto ff_clk_disable;
3867 }
3868
Venkat Gopalakrishnan9e069632013-06-12 11:16:37 -07003869 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
3870
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303871 ret = sdhci_msm_bus_register(msm_host, pdev);
3872 if (ret)
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003873 goto sleep_clk_disable;
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303874
3875 if (msm_host->msm_bus_vote.client_handle)
3876 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
3877 sdhci_msm_bus_work);
3878 sdhci_msm_bus_voting(host, 1);
3879
Asutosh Das0ef24812012-12-18 16:14:02 +05303880 /* Setup regulators */
3881 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
3882 if (ret) {
3883 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05303884 goto bus_unregister;
Asutosh Das0ef24812012-12-18 16:14:02 +05303885 }
3886
3887 /* Reset the core and Enable SDHC mode */
3888 core_memres = platform_get_resource_byname(pdev,
3889 IORESOURCE_MEM, "core_mem");
Asutosh Das890bdee2014-08-08 23:01:42 +05303890 if (!core_memres) {
3891 dev_err(&pdev->dev, "Failed to get iomem resource\n");
3892 goto vreg_deinit;
3893 }
Asutosh Das0ef24812012-12-18 16:14:02 +05303894 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
3895 resource_size(core_memres));
3896
3897 if (!msm_host->core_mem) {
3898 dev_err(&pdev->dev, "Failed to remap registers\n");
3899 ret = -ENOMEM;
3900 goto vreg_deinit;
3901 }
3902
Sahitya Tummala079ed852015-10-29 20:18:45 +05303903 tlmm_memres = platform_get_resource_byname(pdev,
3904 IORESOURCE_MEM, "tlmm_mem");
3905 if (tlmm_memres) {
3906 tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
3907 resource_size(tlmm_memres));
3908
3909 if (!tlmm_mem) {
3910 dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
3911 ret = -ENOMEM;
3912 goto vreg_deinit;
3913 }
3914 writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
3915 dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
3916 &tlmm_memres->start, readl_relaxed(tlmm_mem));
3917 }
3918
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303919 /*
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003920 * Reset the vendor spec register to power on reset state.
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303921 */
Venkat Gopalakrishnan17edb352015-06-24 14:46:49 -07003922 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
3923 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala66b0fe32013-04-25 11:50:56 +05303924
Asutosh Das0ef24812012-12-18 16:14:02 +05303925 /* Set HC_MODE_EN bit in HC_MODE register */
3926 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
3927
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07003928 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
3929 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
3930 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
3931
Pratibhasagar Vd8acb7c2013-11-11 01:32:21 +05303932 sdhci_set_default_hw_caps(msm_host, host);
Krishna Konda46fd1432014-10-30 21:13:27 -07003933
3934 /*
3935 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
3936 * be used as required later on.
3937 */
3938 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
3939 CORE_IO_PAD_PWR_SWITCH_EN),
3940 host->ioaddr + CORE_VENDOR_SPEC);
Asutosh Das0ef24812012-12-18 16:14:02 +05303941 /*
Subhash Jadavani28137342013-05-14 17:46:43 +05303942 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
3943 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
3944 * interrupt in GIC (by registering the interrupt handler), we need to
3945 * ensure that any pending power irq interrupt status is acknowledged
3946 * otherwise power irq interrupt handler would be fired prematurely.
3947 */
3948 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
3949 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
3950 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
3951 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
3952 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
3953 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
3954 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
3955 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
Krishna Konda46fd1432014-10-30 21:13:27 -07003956
Subhash Jadavani28137342013-05-14 17:46:43 +05303957 /*
3958 * Ensure that above writes are propogated before interrupt enablement
3959 * in GIC.
3960 */
3961 mb();
3962
3963 /*
Asutosh Das0ef24812012-12-18 16:14:02 +05303964 * Following are the deviations from SDHC spec v3.0 -
3965 * 1. Card detection is handled using separate GPIO.
3966 * 2. Bus power control is handled by interacting with PMIC.
3967 */
3968 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
3969 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303970 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
Talel Shenhar4661c2a2015-06-24 15:49:30 +03003971 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
Sahitya Tummala22dd3362013-02-28 19:50:51 +05303972 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummala87d43942013-04-12 11:49:11 +05303973 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummala314162c2013-04-12 12:11:20 +05303974 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala7c9780d2013-04-12 11:59:25 +05303975 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das0ef24812012-12-18 16:14:02 +05303976
Sahitya Tummalaa5733ab52013-06-10 16:32:51 +05303977 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
3978 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
3979
Stephen Boyd8dce5c62013-04-24 14:19:46 -07003980 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07003981 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
3982 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
3983 SDHCI_VENDOR_VER_SHIFT));
3984 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
3985 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
3986 /*
3987 * Add 40us delay in interrupt handler when
3988 * operating at initialization frequency(400KHz).
3989 */
3990 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
3991 /*
3992 * Set Software Reset for DAT line in Software
3993 * Reset Register (Bit 2).
3994 */
3995 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
3996 }
3997
Asutosh Das214b9662013-06-13 14:27:42 +05303998 host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
3999
Venkat Gopalakrishnana58f91f2012-09-17 16:00:15 -07004000 /* Setup PWRCTL irq */
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004001 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
4002 if (msm_host->pwr_irq < 0) {
Asutosh Das0ef24812012-12-18 16:14:02 +05304003 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004004 msm_host->pwr_irq);
Asutosh Das0ef24812012-12-18 16:14:02 +05304005 goto vreg_deinit;
4006 }
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004007 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das0ef24812012-12-18 16:14:02 +05304008 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan7944a372012-09-11 16:13:31 -07004009 dev_name(&pdev->dev), host);
Asutosh Das0ef24812012-12-18 16:14:02 +05304010 if (ret) {
4011 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Konstantein Dorfmane0be02d2015-01-27 12:06:02 +02004012 msm_host->pwr_irq, ret);
Asutosh Das0ef24812012-12-18 16:14:02 +05304013 goto vreg_deinit;
4014 }
4015
4016 /* Enable pwr irq interrupts */
4017 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
4018
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304019#ifdef CONFIG_MMC_CLKGATE
4020 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
4021 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
4022#endif
4023
Asutosh Das0ef24812012-12-18 16:14:02 +05304024 /* Set host capabilities */
4025 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
4026 msm_host->mmc->caps |= msm_host->pdata->caps;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004027 msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
Ritesh Harjani34354722015-08-05 11:27:00 +05304028 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
Asutosh Das0ef24812012-12-18 16:14:02 +05304029 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004030 msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4031 msm_host->mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
Venkat Gopalakrishnan97c16112014-12-16 18:26:25 -08004032 msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
Talel Shenhar3d1dbf32015-05-13 14:08:39 +03004033 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Pavan Anamula07d62ef2015-08-24 18:56:22 +05304034 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
Krishna Konda79fdcc22015-09-26 17:55:48 -07004035 msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
Maya Erezb62c9e32015-10-07 21:58:28 +03004036 msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
Asutosh Das0ef24812012-12-18 16:14:02 +05304037
4038 if (msm_host->pdata->nonremovable)
4039 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4040
Guoping Yuf7c91332014-08-20 16:56:18 +08004041 if (msm_host->pdata->nonhotplug)
4042 msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
4043
Sahitya Tummala1f52eaa2013-03-20 19:24:01 +05304044 init_completion(&msm_host->pwr_irq_completion);
4045
Sahitya Tummala581df132013-03-12 14:57:46 +05304046 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
Sahitya Tummala6ddabb42014-06-05 13:26:55 +05304047 /*
4048 * Set up the card detect GPIO in active configuration before
4049 * configuring it as an IRQ. Otherwise, it can be in some
4050 * weird/inconsistent state resulting in flood of interrupts.
4051 */
4052 sdhci_msm_setup_pins(msm_host->pdata, true);
4053
Sahitya Tummalaa3888f42015-02-05 14:05:27 +05304054 /*
4055 * This delay is needed for stabilizing the card detect GPIO
4056 * line after changing the pull configs.
4057 */
4058 usleep_range(10000, 10500);
Sahitya Tummala581df132013-03-12 14:57:46 +05304059 ret = mmc_gpio_request_cd(msm_host->mmc,
4060 msm_host->pdata->status_gpio, 0);
4061 if (ret) {
4062 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
4063 __func__, ret);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304064 goto vreg_deinit;
Sahitya Tummala581df132013-03-12 14:57:46 +05304065 }
4066 }
4067
Krishna Konda7feab352013-09-17 23:55:40 -07004068 if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
4069 (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
4070 host->dma_mask = DMA_BIT_MASK(64);
4071 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304072 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Krishna Konda7feab352013-09-17 23:55:40 -07004073 } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304074 host->dma_mask = DMA_BIT_MASK(32);
4075 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
Pavan Anamulaeeb3b142015-07-22 18:17:32 +05304076 mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
Sahitya Tummalaeaa21862013-03-20 19:34:59 +05304077 } else {
4078 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
4079 }
4080
Venkat Gopalakrishnand371f142015-05-29 17:49:46 -07004081 sdhci_msm_cmdq_init(host, pdev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304082 ret = sdhci_add_host(host);
4083 if (ret) {
4084 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala581df132013-03-12 14:57:46 +05304085 goto vreg_deinit;
Asutosh Das0ef24812012-12-18 16:14:02 +05304086 }
4087
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004088 pm_runtime_set_active(&pdev->dev);
4089 pm_runtime_enable(&pdev->dev);
4090 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
4091 pm_runtime_use_autosuspend(&pdev->dev);
4092
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304093 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
4094 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
4095 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
4096 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
4097 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
4098 ret = device_create_file(&pdev->dev,
4099 &msm_host->msm_bus_vote.max_bus_bw);
4100 if (ret)
4101 goto remove_host;
4102
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304103 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
4104 msm_host->polling.show = show_polling;
4105 msm_host->polling.store = store_polling;
4106 sysfs_attr_init(&msm_host->polling.attr);
4107 msm_host->polling.attr.name = "polling";
4108 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
4109 ret = device_create_file(&pdev->dev, &msm_host->polling);
4110 if (ret)
4111 goto remove_max_bus_bw_file;
4112 }
Asutosh Dase5e9ca62013-07-30 19:08:36 +05304113
4114 msm_host->auto_cmd21_attr.show = show_auto_cmd21;
4115 msm_host->auto_cmd21_attr.store = store_auto_cmd21;
4116 sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
4117 msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
4118 msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
4119 ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4120 if (ret) {
4121 pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
4122 mmc_hostname(host->mmc), __func__, ret);
4123 device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
4124 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304125 /* Successful initialization */
4126 goto out;
4127
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304128remove_max_bus_bw_file:
4129 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das0ef24812012-12-18 16:14:02 +05304130remove_host:
4131 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004132 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304133 sdhci_remove_host(host, dead);
4134vreg_deinit:
4135 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala79edc6a2013-05-23 15:59:22 +05304136bus_unregister:
4137 if (msm_host->msm_bus_vote.client_handle)
4138 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4139 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan7f691572013-06-23 17:36:46 -07004140sleep_clk_disable:
4141 if (!IS_ERR(msm_host->sleep_clk))
4142 clk_disable_unprepare(msm_host->sleep_clk);
4143ff_clk_disable:
4144 if (!IS_ERR(msm_host->ff_clk))
4145 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das0ef24812012-12-18 16:14:02 +05304146clk_disable:
4147 if (!IS_ERR(msm_host->clk))
4148 clk_disable_unprepare(msm_host->clk);
4149pclk_disable:
4150 if (!IS_ERR(msm_host->pclk))
4151 clk_disable_unprepare(msm_host->pclk);
4152bus_clk_disable:
4153 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
4154 clk_disable_unprepare(msm_host->bus_clk);
4155pltfm_free:
4156 sdhci_pltfm_free(pdev);
4157out:
4158 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
4159 return ret;
4160}
4161
4162static int sdhci_msm_remove(struct platform_device *pdev)
4163{
4164 struct sdhci_host *host = platform_get_drvdata(pdev);
4165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4166 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4167 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
4168 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
4169 0xffffffff);
4170
4171 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala5c55b932013-06-20 14:00:18 +05304172 if (!gpio_is_valid(msm_host->pdata->status_gpio))
4173 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304174 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004175 pm_runtime_disable(&pdev->dev);
Asutosh Das0ef24812012-12-18 16:14:02 +05304176 sdhci_remove_host(host, dead);
4177 sdhci_pltfm_free(pdev);
Sahitya Tummala581df132013-03-12 14:57:46 +05304178
Asutosh Das0ef24812012-12-18 16:14:02 +05304179 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalaa7f3c572013-01-11 11:30:45 +05304180
Pratibhasagar V9acf2642013-11-21 21:07:21 +05304181 sdhci_msm_setup_pins(pdata, true);
4182 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala8a3e8182013-03-10 14:12:52 +05304183
4184 if (msm_host->msm_bus_vote.client_handle) {
4185 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4186 sdhci_msm_bus_unregister(msm_host);
4187 }
Asutosh Das0ef24812012-12-18 16:14:02 +05304188 return 0;
4189}
4190
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004191#ifdef CONFIG_PM
4192static int sdhci_msm_runtime_suspend(struct device *dev)
4193{
4194 struct sdhci_host *host = dev_get_drvdata(dev);
4195 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4196 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004197 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004198
Pavan Anamula45ef1372015-10-29 23:22:12 +05304199 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4200 if (mmc_enable_qca6574_settings(host->mmc->card) ||
4201 mmc_enable_qca9377_settings(host->mmc->card))
4202 return 0;
4203 }
4204
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004205 disable_irq(host->irq);
4206 disable_irq(msm_host->pwr_irq);
4207
4208 /*
4209 * Remove the vote immediately only if clocks are off in which
4210 * case we might have queued work to remove vote but it may not
4211 * be completed before runtime suspend or system suspend.
4212 */
4213 if (!atomic_read(&msm_host->clks_on)) {
4214 if (msm_host->msm_bus_vote.client_handle)
4215 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
4216 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004217 trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
4218 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004219
4220 return 0;
4221}
4222
4223static int sdhci_msm_runtime_resume(struct device *dev)
4224{
4225 struct sdhci_host *host = dev_get_drvdata(dev);
4226 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4227 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004228 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004229
Pavan Anamula45ef1372015-10-29 23:22:12 +05304230 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
4231 if (mmc_enable_qca6574_settings(host->mmc->card) ||
4232 mmc_enable_qca9377_settings(host->mmc->card))
4233 return 0;
4234 }
4235
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004236 enable_irq(msm_host->pwr_irq);
4237 enable_irq(host->irq);
4238
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004239 trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
4240 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004241 return 0;
4242}
4243
4244static int sdhci_msm_suspend(struct device *dev)
4245{
4246 struct sdhci_host *host = dev_get_drvdata(dev);
4247 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4248 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004249 int ret = 0;
4250 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004251
4252 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4253 (msm_host->mmc->slot.cd_irq >= 0))
4254 disable_irq(msm_host->mmc->slot.cd_irq);
4255
4256 if (pm_runtime_suspended(dev)) {
4257 pr_debug("%s: %s: already runtime suspended\n",
4258 mmc_hostname(host->mmc), __func__);
4259 goto out;
4260 }
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004261 ret = sdhci_msm_runtime_suspend(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004262out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004263 trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
4264 ktime_to_us(ktime_sub(ktime_get(), start)));
4265 return ret;
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004266}
4267
4268static int sdhci_msm_resume(struct device *dev)
4269{
4270 struct sdhci_host *host = dev_get_drvdata(dev);
4271 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4272 struct sdhci_msm_host *msm_host = pltfm_host->priv;
4273 int ret = 0;
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004274 ktime_t start = ktime_get();
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004275
4276 if (gpio_is_valid(msm_host->pdata->status_gpio) &&
4277 (msm_host->mmc->slot.cd_irq >= 0))
4278 enable_irq(msm_host->mmc->slot.cd_irq);
4279
4280 if (pm_runtime_suspended(dev)) {
4281 pr_debug("%s: %s: runtime suspended, defer system resume\n",
4282 mmc_hostname(host->mmc), __func__);
4283 goto out;
4284 }
4285
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004286 ret = sdhci_msm_runtime_resume(dev);
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004287out:
Konstantin Dorfmanddab0cc2015-02-25 16:23:50 +02004288 trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
4289 ktime_to_us(ktime_sub(ktime_get(), start)));
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004290 return ret;
4291}
4292
4293static const struct dev_pm_ops sdhci_msm_pmops = {
4294 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
4295 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
4296 NULL)
4297};
4298
4299#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
4300
4301#else
4302#define SDHCI_MSM_PMOPS NULL
4303#endif
Asutosh Das0ef24812012-12-18 16:14:02 +05304304static const struct of_device_id sdhci_msm_dt_match[] = {
4305 {.compatible = "qcom,sdhci-msm"},
Venkat Gopalakrishnan272ba402015-06-25 12:00:02 -07004306 {},
Asutosh Das0ef24812012-12-18 16:14:02 +05304307};
4308MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
4309
4310static struct platform_driver sdhci_msm_driver = {
4311 .probe = sdhci_msm_probe,
4312 .remove = sdhci_msm_remove,
4313 .driver = {
4314 .name = "sdhci_msm",
4315 .owner = THIS_MODULE,
4316 .of_match_table = sdhci_msm_dt_match,
Konstantin Dorfman98377d32015-02-25 10:09:41 +02004317 .pm = SDHCI_MSM_PMOPS,
Asutosh Das0ef24812012-12-18 16:14:02 +05304318 },
4319};
4320
4321module_platform_driver(sdhci_msm_driver);
4322
4323MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
4324MODULE_LICENSE("GPL v2");