blob: 4d3a560ea01819bca2a27d071524895ec2e7e5d3 [file] [log] [blame]
Asutosh Das33a4ff52012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm MSM SDHCI Platform
3 * driver source file
4 *
5 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
34#include <linux/mmc/mmc.h>
Asutosh Dasbbc84782013-02-11 15:31:35 +053035#include <linux/pm.h>
36#include <linux/pm_runtime.h>
Sahitya Tummala62448d92013-03-12 14:57:46 +053037#include <linux/mmc/cd-gpio.h>
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +053038#include <linux/dma-mapping.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070039#include <mach/gpio.h>
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +053040#include <mach/msm_bus.h>
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +030041#include <linux/iopoll.h>
Asutosh Das33a4ff52012-12-18 16:14:02 +053042
43#include "sdhci-pltfm.h"
44
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -070045#define SDHCI_VER_100 0x2B
Asutosh Das33a4ff52012-12-18 16:14:02 +053046#define CORE_HC_MODE 0x78
47#define HC_MODE_EN 0x1
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070048#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das33a4ff52012-12-18 16:14:02 +053049
50#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
53#define CORE_PWRCTL_STATUS 0xDC
54#define CORE_PWRCTL_MASK 0xE0
55#define CORE_PWRCTL_CLEAR 0xE4
56#define CORE_PWRCTL_CTL 0xE8
57
58#define CORE_PWRCTL_BUS_OFF 0x01
59#define CORE_PWRCTL_BUS_ON (1 << 1)
60#define CORE_PWRCTL_IO_LOW (1 << 2)
61#define CORE_PWRCTL_IO_HIGH (1 << 3)
62
63#define CORE_PWRCTL_BUS_SUCCESS 0x01
64#define CORE_PWRCTL_BUS_FAIL (1 << 1)
65#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
66#define CORE_PWRCTL_IO_FAIL (1 << 3)
67
68#define INT_MASK 0xF
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070069#define MAX_PHASES 16
70
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070071#define CORE_DLL_CONFIG 0x100
72#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070073#define CORE_DLL_EN (1 << 16)
74#define CORE_CDR_EN (1 << 17)
75#define CORE_CK_OUT_EN (1 << 18)
76#define CORE_CDR_EXT_EN (1 << 19)
77#define CORE_DLL_PDN (1 << 29)
78#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070079
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070080#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070081#define CORE_DLL_LOCK (1 << 7)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070082
83#define CORE_VENDOR_SPEC 0x10C
84#define CORE_CLK_PWRSAVE (1 << 1)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070085#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
86#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
87#define CORE_HC_MCLK_SEL_MASK (3 << 8)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070088#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070089#define CORE_HC_SELECT_IN_EN (1 << 18)
90#define CORE_HC_SELECT_IN_HS400 (6 << 19)
91#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070092
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070093#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
94#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
95
96#define CORE_CSR_CDC_CTLR_CFG0 0x130
97#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
98#define CORE_HW_AUTOCAL_ENA (1 << 17)
99
100#define CORE_CSR_CDC_CTLR_CFG1 0x134
101#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
102#define CORE_TIMER_ENA (1 << 16)
103
104#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
105#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
106#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
107#define CORE_CDC_OFFSET_CFG 0x14C
108#define CORE_CSR_CDC_DELAY_CFG 0x150
109#define CORE_CDC_SLAVE_DDA_CFG 0x160
110#define CORE_CSR_CDC_STATUS0 0x164
111#define CORE_CALIBRATION_DONE (1 << 0)
112
113#define CORE_CDC_ERROR_CODE_MASK 0x7000000
114
115#define CORE_CSR_CDC_GEN_CFG 0x178
116#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
117#define CORE_CDC_SWITCH_RC_EN (1 << 1)
118
119#define CORE_DDR_200_CFG 0x184
120#define CORE_CDC_T4_DLY_SEL (1 << 0)
121#define CORE_START_CDC_TRAFFIC (1 << 6)
122
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300123#define CORE_MCI_DATA_CTRL 0x2C
124#define CORE_MCI_DPSM_ENABLE (1 << 0)
125
126#define CORE_TESTBUS_CONFIG 0x0CC
127#define CORE_TESTBUS_ENA (1 << 3)
128#define CORE_TESTBUS_SEL2 (1 << 4)
129
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -0700130#define CORE_MCI_VERSION 0x050
131#define CORE_VERSION_310 0x10000011
132
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300133/*
134 * Waiting until end of potential AHB access for data:
135 * 16 AHB cycles (160ns for 100MHz and 320ns for 50MHz) +
136 * delay on AHB (2us) = maximum 2.32us
137 * Taking x10 times margin
138 */
139#define CORE_AHB_DATA_DELAY_US 23
140/* Waiting until end of potential AHB access for descriptor:
141 * Single (1 AHB cycle) + delay on AHB bus = max 2us
142 * INCR4 (4 AHB cycles) + delay on AHB bus = max 2us
143 * Single (1 AHB cycle) + delay on AHB bus = max 2us
144 * Total 8 us delay with margin
145 */
146#define CORE_AHB_DESC_DELAY_US 8
147
148#define CORE_SDCC_DEBUG_REG 0x124
149#define CORE_DEBUG_REG_AHB_HTRANS (3 << 12)
150
Asutosh Das3781bd82013-01-10 21:11:04 +0530151/* 8KB descriptors */
152#define SDHCI_MSM_MAX_SEGMENTS (1 << 13)
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530153#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das3781bd82013-01-10 21:11:04 +0530154
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700155#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
156
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700157#define INVALID_TUNING_PHASE -1
158
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700159static const u32 tuning_block_64[] = {
160 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
161 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
162 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
163 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
164};
165
166static const u32 tuning_block_128[] = {
167 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
168 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
169 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
170 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
171 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
172 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
173 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
174 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
175};
Asutosh Das33a4ff52012-12-18 16:14:02 +0530176
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -0700177static int disable_slots;
178/* root can write, others read */
179module_param(disable_slots, int, S_IRUGO|S_IWUSR);
180
Asutosh Das33a4ff52012-12-18 16:14:02 +0530181/* This structure keeps information per regulator */
182struct sdhci_msm_reg_data {
183 /* voltage regulator handle */
184 struct regulator *reg;
185 /* regulator name */
186 const char *name;
187 /* voltage level to be set */
188 u32 low_vol_level;
189 u32 high_vol_level;
190 /* Load values for low power and high power mode */
191 u32 lpm_uA;
192 u32 hpm_uA;
193
194 /* is this regulator enabled? */
195 bool is_enabled;
196 /* is this regulator needs to be always on? */
197 bool is_always_on;
198 /* is low power mode setting required for this regulator? */
199 bool lpm_sup;
Asutosh Das95afcad2013-06-28 15:03:44 +0530200 bool set_voltage_sup;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530201};
202
203/*
204 * This structure keeps information for all the
205 * regulators required for a SDCC slot.
206 */
207struct sdhci_msm_slot_reg_data {
208 /* keeps VDD/VCC regulator info */
209 struct sdhci_msm_reg_data *vdd_data;
210 /* keeps VDD IO regulator info */
211 struct sdhci_msm_reg_data *vdd_io_data;
212};
213
214struct sdhci_msm_gpio {
215 u32 no;
216 const char *name;
217 bool is_enabled;
218};
219
220struct sdhci_msm_gpio_data {
221 struct sdhci_msm_gpio *gpio;
222 u8 size;
223};
224
Asutosh Das390519d2012-12-21 12:21:42 +0530225struct sdhci_msm_pad_pull {
226 enum msm_tlmm_pull_tgt no;
227 u32 val;
228};
229
230struct sdhci_msm_pad_pull_data {
231 struct sdhci_msm_pad_pull *on;
232 struct sdhci_msm_pad_pull *off;
233 u8 size;
234};
235
236struct sdhci_msm_pad_drv {
237 enum msm_tlmm_hdrive_tgt no;
238 u32 val;
239};
240
241struct sdhci_msm_pad_drv_data {
242 struct sdhci_msm_pad_drv *on;
243 struct sdhci_msm_pad_drv *off;
244 u8 size;
245};
246
247struct sdhci_msm_pad_data {
248 struct sdhci_msm_pad_pull_data *pull;
249 struct sdhci_msm_pad_drv_data *drv;
250};
251
252
Asutosh Das33a4ff52012-12-18 16:14:02 +0530253struct sdhci_msm_pin_data {
254 /*
255 * = 1 if controller pins are using gpios
256 * = 0 if controller has dedicated MSM pads
257 */
Asutosh Das390519d2012-12-21 12:21:42 +0530258 u8 is_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530259 bool cfg_sts;
260 struct sdhci_msm_gpio_data *gpio_data;
Asutosh Das390519d2012-12-21 12:21:42 +0530261 struct sdhci_msm_pad_data *pad_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530262};
263
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530264struct sdhci_msm_bus_voting_data {
265 struct msm_bus_scale_pdata *bus_pdata;
266 unsigned int *bw_vecs;
267 unsigned int bw_vecs_size;
268};
269
Asutosh Das33a4ff52012-12-18 16:14:02 +0530270struct sdhci_msm_pltfm_data {
271 /* Supported UHS-I Modes */
272 u32 caps;
273
274 /* More capabilities */
275 u32 caps2;
276
277 unsigned long mmc_bus_width;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530278 struct sdhci_msm_slot_reg_data *vreg_data;
279 bool nonremovable;
280 struct sdhci_msm_pin_data *pin_data;
Sahitya Tummalab4e84042013-03-10 07:03:17 +0530281 u32 cpu_dma_latency_us;
Sahitya Tummala62448d92013-03-12 14:57:46 +0530282 int status_gpio; /* card detection GPIO that is configured as IRQ */
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530283 struct sdhci_msm_bus_voting_data *voting_data;
Sahitya Tummala00240122013-02-28 19:50:51 +0530284 u32 *sup_clk_table;
285 unsigned char sup_clk_cnt;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530286};
287
288struct sdhci_msm_bus_vote {
289 uint32_t client_handle;
290 uint32_t curr_vote;
291 int min_bw_vote;
292 int max_bw_vote;
293 bool is_max_bw_needed;
294 struct delayed_work vote_work;
295 struct device_attribute max_bus_bw;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530296};
297
298struct sdhci_msm_host {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530299 struct platform_device *pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530300 void __iomem *core_mem; /* MSM SDCC mapped address */
Asutosh Dasbbc84782013-02-11 15:31:35 +0530301 int pwr_irq; /* power irq */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530302 struct clk *clk; /* main SD/MMC bus clock */
303 struct clk *pclk; /* SDHC peripheral bus clock */
304 struct clk *bus_clk; /* SDHC bus voter clock */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700305 struct clk *ff_clk; /* CDC calibration fixed feedback clock */
306 struct clk *sleep_clk; /* CDC calibration sleep clock */
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530307 atomic_t clks_on; /* Set if clocks are enabled */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530308 struct sdhci_msm_pltfm_data *pdata;
309 struct mmc_host *mmc;
310 struct sdhci_pltfm_data sdhci_msm_pdata;
Sahitya Tummala179e7382013-03-20 19:24:01 +0530311 u32 curr_pwr_state;
312 u32 curr_io_level;
313 struct completion pwr_irq_completion;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530314 struct sdhci_msm_bus_vote msm_bus_vote;
Sahitya Tummala3b292c32013-06-20 14:00:18 +0530315 struct device_attribute polling;
Sahitya Tummala00240122013-02-28 19:50:51 +0530316 u32 clk_rate; /* Keeps track of current clock rate that is set */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700317 bool tuning_done;
318 bool calibration_done;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700319 u8 saved_tuning_phase;
Asutosh Das6c0804b2013-11-08 12:33:47 +0530320 atomic_t controller_clock;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530321};
322
323enum vdd_io_level {
324 /* set vdd_io_data->low_vol_level */
325 VDD_IO_LOW,
326 /* set vdd_io_data->high_vol_level */
327 VDD_IO_HIGH,
328 /*
329 * set whatever there in voltage_level (third argument) of
330 * sdhci_msm_set_vdd_io_vol() function.
331 */
332 VDD_IO_SET_LEVEL,
333};
334
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700335/* MSM platform specific tuning */
336static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
337 u8 poll)
338{
339 int rc = 0;
340 u32 wait_cnt = 50;
341 u8 ck_out_en = 0;
342 struct mmc_host *mmc = host->mmc;
343
344 /* poll for CK_OUT_EN bit. max. poll time = 50us */
345 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
346 CORE_CK_OUT_EN);
347
348 while (ck_out_en != poll) {
349 if (--wait_cnt == 0) {
350 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
351 mmc_hostname(mmc), __func__, poll);
352 rc = -ETIMEDOUT;
353 goto out;
354 }
355 udelay(1);
356
357 ck_out_en = !!(readl_relaxed(host->ioaddr +
358 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
359 }
360out:
361 return rc;
362}
363
364static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
365{
366 int rc = 0;
367 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
368 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
369 0x8};
370 unsigned long flags;
371 u32 config;
372 struct mmc_host *mmc = host->mmc;
373
374 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
375 spin_lock_irqsave(&host->lock, flags);
376
377 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
378 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
379 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
380 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
381
382 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
383 rc = msm_dll_poll_ck_out_en(host, 0);
384 if (rc)
385 goto err_out;
386
387 /*
388 * Write the selected DLL clock output phase (0 ... 15)
389 * to CDR_SELEXT bit field of DLL_CONFIG register.
390 */
391 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
392 & ~(0xF << 20))
393 | (grey_coded_phase_table[phase] << 20)),
394 host->ioaddr + CORE_DLL_CONFIG);
395
396 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
397 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
398 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
399
400 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
401 rc = msm_dll_poll_ck_out_en(host, 1);
402 if (rc)
403 goto err_out;
404
405 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
406 config |= CORE_CDR_EN;
407 config &= ~CORE_CDR_EXT_EN;
408 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
409 goto out;
410
411err_out:
412 pr_err("%s: %s: Failed to set DLL phase: %d\n",
413 mmc_hostname(mmc), __func__, phase);
414out:
415 spin_unlock_irqrestore(&host->lock, flags);
416 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
417 return rc;
418}
419
420/*
421 * Find out the greatest range of consecuitive selected
422 * DLL clock output phases that can be used as sampling
423 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700424 * timing mode) or for eMMC4.5 card read operation (in
425 * HS400/HS200 timing mode).
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700426 * Select the 3/4 of the range and configure the DLL with the
427 * selected DLL clock output phase.
428 */
429
430static int msm_find_most_appropriate_phase(struct sdhci_host *host,
431 u8 *phase_table, u8 total_phases)
432{
433 int ret;
434 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
435 u8 phases_per_row[MAX_PHASES] = {0};
436 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
437 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
438 bool phase_0_found = false, phase_15_found = false;
439 struct mmc_host *mmc = host->mmc;
440
441 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
442 if (!total_phases || (total_phases > MAX_PHASES)) {
443 pr_err("%s: %s: invalid argument: total_phases=%d\n",
444 mmc_hostname(mmc), __func__, total_phases);
445 return -EINVAL;
446 }
447
448 for (cnt = 0; cnt < total_phases; cnt++) {
449 ranges[row_index][col_index] = phase_table[cnt];
450 phases_per_row[row_index] += 1;
451 col_index++;
452
453 if ((cnt + 1) == total_phases) {
454 continue;
455 /* check if next phase in phase_table is consecutive or not */
456 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
457 row_index++;
458 col_index = 0;
459 }
460 }
461
462 if (row_index >= MAX_PHASES)
463 return -EINVAL;
464
465 /* Check if phase-0 is present in first valid window? */
466 if (!ranges[0][0]) {
467 phase_0_found = true;
468 phase_0_raw_index = 0;
469 /* Check if cycle exist between 2 valid windows */
470 for (cnt = 1; cnt <= row_index; cnt++) {
471 if (phases_per_row[cnt]) {
472 for (i = 0; i < phases_per_row[cnt]; i++) {
473 if (ranges[cnt][i] == 15) {
474 phase_15_found = true;
475 phase_15_raw_index = cnt;
476 break;
477 }
478 }
479 }
480 }
481 }
482
483 /* If 2 valid windows form cycle then merge them as single window */
484 if (phase_0_found && phase_15_found) {
485 /* number of phases in raw where phase 0 is present */
486 u8 phases_0 = phases_per_row[phase_0_raw_index];
487 /* number of phases in raw where phase 15 is present */
488 u8 phases_15 = phases_per_row[phase_15_raw_index];
489
490 if (phases_0 + phases_15 >= MAX_PHASES)
491 /*
492 * If there are more than 1 phase windows then total
493 * number of phases in both the windows should not be
494 * more than or equal to MAX_PHASES.
495 */
496 return -EINVAL;
497
498 /* Merge 2 cyclic windows */
499 i = phases_15;
500 for (cnt = 0; cnt < phases_0; cnt++) {
501 ranges[phase_15_raw_index][i] =
502 ranges[phase_0_raw_index][cnt];
503 if (++i >= MAX_PHASES)
504 break;
505 }
506
507 phases_per_row[phase_0_raw_index] = 0;
508 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
509 }
510
511 for (cnt = 0; cnt <= row_index; cnt++) {
512 if (phases_per_row[cnt] > curr_max) {
513 curr_max = phases_per_row[cnt];
514 selected_row_index = cnt;
515 }
516 }
517
518 i = ((curr_max * 3) / 4);
519 if (i)
520 i--;
521
522 ret = (int)ranges[selected_row_index][i];
523
524 if (ret >= MAX_PHASES) {
525 ret = -EINVAL;
526 pr_err("%s: %s: invalid phase selected=%d\n",
527 mmc_hostname(mmc), __func__, ret);
528 }
529
530 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
531 return ret;
532}
533
534static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
535{
536 u32 mclk_freq = 0;
537
538 /* Program the MCLK value to MCLK_FREQ bit field */
539 if (host->clock <= 112000000)
540 mclk_freq = 0;
541 else if (host->clock <= 125000000)
542 mclk_freq = 1;
543 else if (host->clock <= 137000000)
544 mclk_freq = 2;
545 else if (host->clock <= 150000000)
546 mclk_freq = 3;
547 else if (host->clock <= 162000000)
548 mclk_freq = 4;
549 else if (host->clock <= 175000000)
550 mclk_freq = 5;
551 else if (host->clock <= 187000000)
552 mclk_freq = 6;
553 else if (host->clock <= 200000000)
554 mclk_freq = 7;
555
556 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
557 & ~(7 << 24)) | (mclk_freq << 24)),
558 host->ioaddr + CORE_DLL_CONFIG);
559}
560
561/* Initialize the DLL (Programmable Delay Line ) */
562static int msm_init_cm_dll(struct sdhci_host *host)
563{
564 struct mmc_host *mmc = host->mmc;
565 int rc = 0;
566 unsigned long flags;
567 u32 wait_cnt;
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530568 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700569
570 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
571 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530572 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
573 CORE_CLK_PWRSAVE);
574 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700575 /*
576 * Make sure that clock is always enabled when DLL
577 * tuning is in progress. Keeping PWRSAVE ON may
578 * turn off the clock. So let's disable the PWRSAVE
579 * here and re-enable it once tuning is completed.
580 */
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530581 if (prev_pwrsave) {
582 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
583 & ~CORE_CLK_PWRSAVE),
584 host->ioaddr + CORE_VENDOR_SPEC);
585 curr_pwrsave = false;
586 }
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700587
588 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
589 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
590 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
591
592 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
593 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
594 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
595 msm_cm_dll_set_freq(host);
596
597 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
598 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
599 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
600
601 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
602 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
603 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
604
605 /* Set DLL_EN bit to 1. */
606 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
607 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
608
609 /* Set CK_OUT_EN bit to 1. */
610 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
611 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
612
613 wait_cnt = 50;
614 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
615 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
616 CORE_DLL_LOCK)) {
617 /* max. wait for 50us sec for LOCK bit to be set */
618 if (--wait_cnt == 0) {
619 pr_err("%s: %s: DLL failed to LOCK\n",
620 mmc_hostname(mmc), __func__);
621 rc = -ETIMEDOUT;
622 goto out;
623 }
624 /* wait for 1us before polling again */
625 udelay(1);
626 }
627
628out:
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530629 /* Restore the correct PWRSAVE state */
630 if (prev_pwrsave ^ curr_pwrsave) {
631 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
632
633 if (prev_pwrsave)
634 reg |= CORE_CLK_PWRSAVE;
635 else
636 reg &= ~CORE_CLK_PWRSAVE;
637
638 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
639 }
640
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700641 spin_unlock_irqrestore(&host->lock, flags);
642 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
643 return rc;
644}
645
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700646static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
647{
648 u32 wait_cnt;
649 int ret = 0;
650 int cdc_err = 0;
651 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
652 struct sdhci_msm_host *msm_host = pltfm_host->priv;
653
654 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
655
656 /*
657 * Retuning in HS400 (DDR mode) will fail, just reset the
658 * tuning block and restore the saved tuning phase.
659 */
660 ret = msm_init_cm_dll(host);
661 if (ret)
662 goto out;
663
664 /* Set the selected phase in delay line hw block */
665 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
666 if (ret)
667 goto out;
668
669 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
670 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
671 | CORE_CMD_DAT_TRACK_SEL),
672 host->ioaddr + CORE_DLL_CONFIG);
673
674 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
675 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
676 & ~CORE_CDC_T4_DLY_SEL),
677 host->ioaddr + CORE_DDR_200_CFG);
678
679 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
680 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
681 & ~CORE_CDC_SWITCH_BYPASS_OFF),
682 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
683
684 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
685 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
686 | CORE_CDC_SWITCH_RC_EN),
687 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
688
689 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
690 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
691 & ~CORE_START_CDC_TRAFFIC),
692 host->ioaddr + CORE_DDR_200_CFG);
693
694 /*
695 * Perform CDC Register Initialization Sequence
696 *
697 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
698 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
699 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
700 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
701 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
702 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
703 * CORE_CSR_CDC_DELAY_CFG 0x3AC
704 * CORE_CDC_OFFSET_CFG 0x0
705 * CORE_CDC_SLAVE_DDA_CFG 0x16334
706 */
707
708 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
709 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
710 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
711 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
712 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
713 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
714 writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
715 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
716 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
717
718 /* CDC HW Calibration */
719
720 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
721 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
722 | CORE_SW_TRIG_FULL_CALIB),
723 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
724
725 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
726 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
727 & ~CORE_SW_TRIG_FULL_CALIB),
728 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
729
730 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
731 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
732 | CORE_HW_AUTOCAL_ENA),
733 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
734
735 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
736 writel_relaxed((readl_relaxed(host->ioaddr +
737 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
738 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
739
740 mb();
741
742 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
743 wait_cnt = 50;
744 while (!(readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
745 & CORE_CALIBRATION_DONE)) {
746 /* max. wait for 50us sec for CALIBRATION_DONE bit to be set */
747 if (--wait_cnt == 0) {
748 pr_err("%s: %s: CDC Calibration was not completed\n",
749 mmc_hostname(host->mmc), __func__);
750 ret = -ETIMEDOUT;
751 goto out;
752 }
753 /* wait for 1us before polling again */
754 udelay(1);
755 }
756
757 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
758 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
759 & CORE_CDC_ERROR_CODE_MASK;
760 if (cdc_err) {
761 pr_err("%s: %s: CDC Error Code %d\n",
762 mmc_hostname(host->mmc), __func__, cdc_err);
763 ret = -EINVAL;
764 goto out;
765 }
766
767 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
768 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
769 | CORE_START_CDC_TRAFFIC),
770 host->ioaddr + CORE_DDR_200_CFG);
771out:
772 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
773 __func__, ret);
774 return ret;
775}
776
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700777int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
778{
779 unsigned long flags;
Sahitya Tummala714e9642013-06-13 10:36:57 +0530780 int tuning_seq_cnt = 3;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700781 u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
782 const u32 *tuning_block_pattern = tuning_block_64;
783 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
784 int rc;
785 struct mmc_host *mmc = host->mmc;
Sahitya Tummala00240122013-02-28 19:50:51 +0530786 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700787 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
788 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala00240122013-02-28 19:50:51 +0530789
790 /*
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700791 * Tuning is required for SDR104, HS200 and HS400 cards and
792 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala00240122013-02-28 19:50:51 +0530793 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700794 if (host->clock <= CORE_FREQ_100MHZ ||
795 !((ios.timing == MMC_TIMING_MMC_HS400) ||
796 (ios.timing == MMC_TIMING_MMC_HS200) ||
797 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala00240122013-02-28 19:50:51 +0530798 return 0;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700799
800 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700801
802 /* CDCLP533 HW calibration is only required for HS400 mode*/
803 if (msm_host->tuning_done && !msm_host->calibration_done &&
804 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
805 rc = sdhci_msm_cdclp533_calibration(host);
806 spin_lock_irqsave(&host->lock, flags);
807 if (!rc)
808 msm_host->calibration_done = true;
809 spin_unlock_irqrestore(&host->lock, flags);
810 goto out;
811 }
812
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700813 spin_lock_irqsave(&host->lock, flags);
814
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700815 if (((opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
816 (opcode == MMC_SEND_TUNING_BLOCK_HS200)) &&
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700817 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
818 tuning_block_pattern = tuning_block_128;
819 size = sizeof(tuning_block_128);
820 }
821 spin_unlock_irqrestore(&host->lock, flags);
822
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700823 data_buf = kmalloc(size, GFP_KERNEL);
824 if (!data_buf) {
825 rc = -ENOMEM;
826 goto out;
827 }
828
Sahitya Tummala714e9642013-06-13 10:36:57 +0530829retry:
830 /* first of all reset the tuning block */
831 rc = msm_init_cm_dll(host);
832 if (rc)
833 goto kfree;
834
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700835 phase = 0;
836 do {
837 struct mmc_command cmd = {0};
838 struct mmc_data data = {0};
839 struct mmc_request mrq = {
840 .cmd = &cmd,
841 .data = &data
842 };
843 struct scatterlist sg;
844
845 /* set the phase in delay line hw block */
846 rc = msm_config_cm_dll_phase(host, phase);
847 if (rc)
848 goto kfree;
849
850 cmd.opcode = opcode;
851 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
852
853 data.blksz = size;
854 data.blocks = 1;
855 data.flags = MMC_DATA_READ;
856 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
857
858 data.sg = &sg;
859 data.sg_len = 1;
860 sg_init_one(&sg, data_buf, size);
861 memset(data_buf, 0, size);
862 mmc_wait_for_req(mmc, &mrq);
863
864 if (!cmd.error && !data.error &&
865 !memcmp(data_buf, tuning_block_pattern, size)) {
866 /* tuning is successful at this tuning point */
867 tuned_phases[tuned_phase_cnt++] = phase;
868 pr_debug("%s: %s: found good phase = %d\n",
869 mmc_hostname(mmc), __func__, phase);
870 }
871 } while (++phase < 16);
872
873 if (tuned_phase_cnt) {
874 rc = msm_find_most_appropriate_phase(host, tuned_phases,
875 tuned_phase_cnt);
876 if (rc < 0)
877 goto kfree;
878 else
879 phase = (u8)rc;
880
881 /*
882 * Finally set the selected phase in delay
883 * line hw block.
884 */
885 rc = msm_config_cm_dll_phase(host, phase);
886 if (rc)
887 goto kfree;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700888 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700889 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
890 mmc_hostname(mmc), __func__, phase);
891 } else {
Sahitya Tummala714e9642013-06-13 10:36:57 +0530892 if (--tuning_seq_cnt)
893 goto retry;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700894 /* tuning failed */
895 pr_err("%s: %s: no tuning point found\n",
896 mmc_hostname(mmc), __func__);
Sahitya Tummala714e9642013-06-13 10:36:57 +0530897 rc = -EIO;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700898 }
899
900kfree:
901 kfree(data_buf);
902out:
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700903 spin_lock_irqsave(&host->lock, flags);
904 if (!rc)
905 msm_host->tuning_done = true;
906 spin_unlock_irqrestore(&host->lock, flags);
907 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700908 return rc;
909}
910
Asutosh Das33a4ff52012-12-18 16:14:02 +0530911static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
912{
913 struct sdhci_msm_gpio_data *curr;
914 int i, ret = 0;
915
916 curr = pdata->pin_data->gpio_data;
917 for (i = 0; i < curr->size; i++) {
918 if (!gpio_is_valid(curr->gpio[i].no)) {
919 ret = -EINVAL;
920 pr_err("%s: Invalid gpio = %d\n", __func__,
921 curr->gpio[i].no);
922 goto free_gpios;
923 }
924 if (enable) {
925 ret = gpio_request(curr->gpio[i].no,
926 curr->gpio[i].name);
927 if (ret) {
928 pr_err("%s: gpio_request(%d, %s) failed %d\n",
929 __func__, curr->gpio[i].no,
930 curr->gpio[i].name, ret);
931 goto free_gpios;
932 }
933 curr->gpio[i].is_enabled = true;
934 } else {
935 gpio_free(curr->gpio[i].no);
936 curr->gpio[i].is_enabled = false;
937 }
938 }
939 return ret;
940
941free_gpios:
942 for (i--; i >= 0; i--) {
943 gpio_free(curr->gpio[i].no);
944 curr->gpio[i].is_enabled = false;
945 }
946 return ret;
947}
948
Asutosh Das390519d2012-12-21 12:21:42 +0530949static int sdhci_msm_setup_pad(struct sdhci_msm_pltfm_data *pdata, bool enable)
950{
951 struct sdhci_msm_pad_data *curr;
952 int i;
953
954 curr = pdata->pin_data->pad_data;
955 for (i = 0; i < curr->drv->size; i++) {
956 if (enable)
957 msm_tlmm_set_hdrive(curr->drv->on[i].no,
958 curr->drv->on[i].val);
959 else
960 msm_tlmm_set_hdrive(curr->drv->off[i].no,
961 curr->drv->off[i].val);
962 }
963
964 for (i = 0; i < curr->pull->size; i++) {
965 if (enable)
966 msm_tlmm_set_pull(curr->pull->on[i].no,
967 curr->pull->on[i].val);
968 else
969 msm_tlmm_set_pull(curr->pull->off[i].no,
970 curr->pull->off[i].val);
971 }
972
973 return 0;
974}
975
Asutosh Das33a4ff52012-12-18 16:14:02 +0530976static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
977{
978 int ret = 0;
979
980 if (!pdata->pin_data || (pdata->pin_data->cfg_sts == enable))
981 return 0;
Asutosh Das390519d2012-12-21 12:21:42 +0530982 if (pdata->pin_data->is_gpio)
983 ret = sdhci_msm_setup_gpio(pdata, enable);
984 else
985 ret = sdhci_msm_setup_pad(pdata, enable);
Asutosh Das33a4ff52012-12-18 16:14:02 +0530986
Asutosh Das33a4ff52012-12-18 16:14:02 +0530987 if (!ret)
988 pdata->pin_data->cfg_sts = enable;
989
990 return ret;
991}
992
Asutosh Das390519d2012-12-21 12:21:42 +0530993static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
994 u32 **out, int *len, u32 size)
995{
996 int ret = 0;
997 struct device_node *np = dev->of_node;
998 size_t sz;
999 u32 *arr = NULL;
1000
1001 if (!of_get_property(np, prop_name, len)) {
1002 ret = -EINVAL;
1003 goto out;
1004 }
1005 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001006 if (sz <= 0 || (size > 0 && (sz > size))) {
Asutosh Das390519d2012-12-21 12:21:42 +05301007 dev_err(dev, "%s invalid size\n", prop_name);
1008 ret = -EINVAL;
1009 goto out;
1010 }
1011
1012 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1013 if (!arr) {
1014 dev_err(dev, "%s failed allocating memory\n", prop_name);
1015 ret = -ENOMEM;
1016 goto out;
1017 }
1018
1019 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1020 if (ret < 0) {
1021 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1022 goto out;
1023 }
1024 *out = arr;
1025out:
1026 if (ret)
1027 *len = 0;
1028 return ret;
1029}
1030
Asutosh Das33a4ff52012-12-18 16:14:02 +05301031#define MAX_PROP_SIZE 32
1032static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1033 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1034{
1035 int len, ret = 0;
1036 const __be32 *prop;
1037 char prop_name[MAX_PROP_SIZE];
1038 struct sdhci_msm_reg_data *vreg;
1039 struct device_node *np = dev->of_node;
1040
1041 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1042 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Das95afcad2013-06-28 15:03:44 +05301043 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301044 return ret;
1045 }
1046
1047 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1048 if (!vreg) {
1049 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1050 ret = -ENOMEM;
1051 return ret;
1052 }
1053
1054 vreg->name = vreg_name;
1055
1056 snprintf(prop_name, MAX_PROP_SIZE,
1057 "qcom,%s-always-on", vreg_name);
1058 if (of_get_property(np, prop_name, NULL))
1059 vreg->is_always_on = true;
1060
1061 snprintf(prop_name, MAX_PROP_SIZE,
1062 "qcom,%s-lpm-sup", vreg_name);
1063 if (of_get_property(np, prop_name, NULL))
1064 vreg->lpm_sup = true;
1065
1066 snprintf(prop_name, MAX_PROP_SIZE,
1067 "qcom,%s-voltage-level", vreg_name);
1068 prop = of_get_property(np, prop_name, &len);
1069 if (!prop || (len != (2 * sizeof(__be32)))) {
1070 dev_warn(dev, "%s %s property\n",
1071 prop ? "invalid format" : "no", prop_name);
1072 } else {
1073 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1074 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1075 }
1076
1077 snprintf(prop_name, MAX_PROP_SIZE,
1078 "qcom,%s-current-level", vreg_name);
1079 prop = of_get_property(np, prop_name, &len);
1080 if (!prop || (len != (2 * sizeof(__be32)))) {
1081 dev_warn(dev, "%s %s property\n",
1082 prop ? "invalid format" : "no", prop_name);
1083 } else {
1084 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1085 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1086 }
1087
1088 *vreg_data = vreg;
1089 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1090 vreg->name, vreg->is_always_on ? "always_on," : "",
1091 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1092 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1093
1094 return ret;
1095}
1096
Asutosh Das390519d2012-12-21 12:21:42 +05301097/* GPIO/Pad data extraction */
1098static int sdhci_msm_dt_get_pad_pull_info(struct device *dev, int id,
1099 struct sdhci_msm_pad_pull_data **pad_pull_data)
1100{
1101 int ret = 0, base = 0, len, i;
1102 u32 *tmp;
1103 struct sdhci_msm_pad_pull_data *pull_data;
1104 struct sdhci_msm_pad_pull *pull;
1105
1106 switch (id) {
1107 case 1:
1108 base = TLMM_PULL_SDC1_CLK;
1109 break;
1110 case 2:
1111 base = TLMM_PULL_SDC2_CLK;
1112 break;
1113 case 3:
1114 base = TLMM_PULL_SDC3_CLK;
1115 break;
1116 case 4:
1117 base = TLMM_PULL_SDC4_CLK;
1118 break;
1119 default:
1120 dev_err(dev, "%s: Invalid slot id\n", __func__);
1121 ret = -EINVAL;
1122 goto out;
1123 }
1124
1125 pull_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_pull_data),
1126 GFP_KERNEL);
1127 if (!pull_data) {
1128 dev_err(dev, "No memory for msm_mmc_pad_pull_data\n");
1129 ret = -ENOMEM;
1130 goto out;
1131 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001132 pull_data->size = 4; /* array size for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301133
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001134 /* Allocate on, off configs for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301135 pull = devm_kzalloc(dev, 2 * pull_data->size *\
1136 sizeof(struct sdhci_msm_pad_pull), GFP_KERNEL);
1137 if (!pull) {
1138 dev_err(dev, "No memory for msm_mmc_pad_pull\n");
1139 ret = -ENOMEM;
1140 goto out;
1141 }
1142 pull_data->on = pull;
1143 pull_data->off = pull + pull_data->size;
1144
1145 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-on",
1146 &tmp, &len, pull_data->size);
1147 if (ret)
1148 goto out;
1149
1150 for (i = 0; i < len; i++) {
1151 pull_data->on[i].no = base + i;
1152 pull_data->on[i].val = tmp[i];
1153 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1154 i, pull_data->on[i].val);
1155 }
1156
1157 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-off",
1158 &tmp, &len, pull_data->size);
1159 if (ret)
1160 goto out;
1161
1162 for (i = 0; i < len; i++) {
1163 pull_data->off[i].no = base + i;
1164 pull_data->off[i].val = tmp[i];
1165 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1166 i, pull_data->off[i].val);
1167 }
1168
1169 *pad_pull_data = pull_data;
1170out:
1171 return ret;
1172}
1173
1174static int sdhci_msm_dt_get_pad_drv_info(struct device *dev, int id,
1175 struct sdhci_msm_pad_drv_data **pad_drv_data)
1176{
1177 int ret = 0, base = 0, len, i;
1178 u32 *tmp;
1179 struct sdhci_msm_pad_drv_data *drv_data;
1180 struct sdhci_msm_pad_drv *drv;
1181
1182 switch (id) {
1183 case 1:
1184 base = TLMM_HDRV_SDC1_CLK;
1185 break;
1186 case 2:
1187 base = TLMM_HDRV_SDC2_CLK;
1188 break;
1189 case 3:
1190 base = TLMM_HDRV_SDC3_CLK;
1191 break;
1192 case 4:
1193 base = TLMM_HDRV_SDC4_CLK;
1194 break;
1195 default:
1196 dev_err(dev, "%s: Invalid slot id\n", __func__);
1197 ret = -EINVAL;
1198 goto out;
1199 }
1200
1201 drv_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_drv_data),
1202 GFP_KERNEL);
1203 if (!drv_data) {
1204 dev_err(dev, "No memory for msm_mmc_pad_drv_data\n");
1205 ret = -ENOMEM;
1206 goto out;
1207 }
1208 drv_data->size = 3; /* array size for clk, cmd, data */
1209
1210 /* Allocate on, off configs for clk, cmd, data */
1211 drv = devm_kzalloc(dev, 2 * drv_data->size *\
1212 sizeof(struct sdhci_msm_pad_drv), GFP_KERNEL);
1213 if (!drv) {
1214 dev_err(dev, "No memory msm_mmc_pad_drv\n");
1215 ret = -ENOMEM;
1216 goto out;
1217 }
1218 drv_data->on = drv;
1219 drv_data->off = drv + drv_data->size;
1220
1221 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-on",
1222 &tmp, &len, drv_data->size);
1223 if (ret)
1224 goto out;
1225
1226 for (i = 0; i < len; i++) {
1227 drv_data->on[i].no = base + i;
1228 drv_data->on[i].val = tmp[i];
1229 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1230 i, drv_data->on[i].val);
1231 }
1232
1233 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-off",
1234 &tmp, &len, drv_data->size);
1235 if (ret)
1236 goto out;
1237
1238 for (i = 0; i < len; i++) {
1239 drv_data->off[i].no = base + i;
1240 drv_data->off[i].val = tmp[i];
1241 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1242 i, drv_data->off[i].val);
1243 }
1244
1245 *pad_drv_data = drv_data;
1246out:
1247 return ret;
1248}
1249
Asutosh Das33a4ff52012-12-18 16:14:02 +05301250#define GPIO_NAME_MAX_LEN 32
1251static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1252 struct sdhci_msm_pltfm_data *pdata)
1253{
Asutosh Das390519d2012-12-21 12:21:42 +05301254 int ret = 0, id = 0, cnt, i;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301255 struct sdhci_msm_pin_data *pin_data;
1256 struct device_node *np = dev->of_node;
1257
1258 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1259 if (!pin_data) {
1260 dev_err(dev, "No memory for pin_data\n");
1261 ret = -ENOMEM;
1262 goto out;
1263 }
1264
1265 cnt = of_gpio_count(np);
1266 if (cnt > 0) {
Asutosh Das390519d2012-12-21 12:21:42 +05301267 pin_data->is_gpio = true;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301268 pin_data->gpio_data = devm_kzalloc(dev,
1269 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1270 if (!pin_data->gpio_data) {
1271 dev_err(dev, "No memory for gpio_data\n");
1272 ret = -ENOMEM;
1273 goto out;
1274 }
1275 pin_data->gpio_data->size = cnt;
1276 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1277 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1278
1279 if (!pin_data->gpio_data->gpio) {
1280 dev_err(dev, "No memory for gpio\n");
1281 ret = -ENOMEM;
1282 goto out;
1283 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301284 for (i = 0; i < cnt; i++) {
1285 const char *name = NULL;
1286 char result[GPIO_NAME_MAX_LEN];
1287 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1288 of_property_read_string_index(np,
1289 "qcom,gpio-names", i, &name);
1290
1291 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1292 dev_name(dev), name ? name : "?");
1293 pin_data->gpio_data->gpio[i].name = result;
1294 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
Asutosh Das390519d2012-12-21 12:21:42 +05301295 pin_data->gpio_data->gpio[i].name,
1296 pin_data->gpio_data->gpio[i].no);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301297 }
Asutosh Das390519d2012-12-21 12:21:42 +05301298 } else {
1299 pin_data->pad_data =
1300 devm_kzalloc(dev,
1301 sizeof(struct sdhci_msm_pad_data),
1302 GFP_KERNEL);
1303 if (!pin_data->pad_data) {
1304 dev_err(dev,
1305 "No memory for pin_data->pad_data\n");
1306 ret = -ENOMEM;
1307 goto out;
1308 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301309
Asutosh Das390519d2012-12-21 12:21:42 +05301310 ret = of_alias_get_id(np, "sdhc");
1311 if (ret < 0) {
1312 dev_err(dev, "Failed to get slot index %d\n", ret);
1313 goto out;
1314 }
1315 id = ret;
1316
1317 ret = sdhci_msm_dt_get_pad_pull_info(
1318 dev, id, &pin_data->pad_data->pull);
1319 if (ret)
1320 goto out;
1321 ret = sdhci_msm_dt_get_pad_drv_info(
1322 dev, id, &pin_data->pad_data->drv);
1323 if (ret)
1324 goto out;
1325
1326 }
1327 pdata->pin_data = pin_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301328out:
1329 if (ret)
1330 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1331 return ret;
1332}
1333
1334/* Parse platform data */
1335static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev)
1336{
1337 struct sdhci_msm_pltfm_data *pdata = NULL;
1338 struct device_node *np = dev->of_node;
1339 u32 bus_width = 0;
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301340 u32 cpu_dma_latency;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301341 int len, i;
Sahitya Tummala00240122013-02-28 19:50:51 +05301342 int clk_table_len;
1343 u32 *clk_table = NULL;
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301344 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301345
1346 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1347 if (!pdata) {
1348 dev_err(dev, "failed to allocate memory for platform data\n");
1349 goto out;
1350 }
1351
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301352 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1353 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1354 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala62448d92013-03-12 14:57:46 +05301355
Asutosh Das33a4ff52012-12-18 16:14:02 +05301356 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1357 if (bus_width == 8)
1358 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1359 else if (bus_width == 4)
1360 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1361 else {
1362 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1363 pdata->mmc_bus_width = 0;
1364 }
1365
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301366 if (!of_property_read_u32(np, "qcom,cpu-dma-latency-us",
1367 &cpu_dma_latency))
1368 pdata->cpu_dma_latency_us = cpu_dma_latency;
1369
Sahitya Tummala00240122013-02-28 19:50:51 +05301370 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1371 &clk_table, &clk_table_len, 0)) {
1372 dev_err(dev, "failed parsing supported clock rates\n");
1373 goto out;
1374 }
1375 if (!clk_table || !clk_table_len) {
1376 dev_err(dev, "Invalid clock table\n");
1377 goto out;
1378 }
1379 pdata->sup_clk_table = clk_table;
1380 pdata->sup_clk_cnt = clk_table_len;
1381
Asutosh Das33a4ff52012-12-18 16:14:02 +05301382 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1383 sdhci_msm_slot_reg_data),
1384 GFP_KERNEL);
1385 if (!pdata->vreg_data) {
1386 dev_err(dev, "failed to allocate memory for vreg data\n");
1387 goto out;
1388 }
1389
1390 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1391 "vdd")) {
1392 dev_err(dev, "failed parsing vdd data\n");
1393 goto out;
1394 }
1395 if (sdhci_msm_dt_parse_vreg_info(dev,
1396 &pdata->vreg_data->vdd_io_data,
1397 "vdd-io")) {
1398 dev_err(dev, "failed parsing vdd-io data\n");
1399 goto out;
1400 }
1401
1402 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1403 dev_err(dev, "failed parsing gpio data\n");
1404 goto out;
1405 }
1406
Asutosh Das33a4ff52012-12-18 16:14:02 +05301407 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1408
1409 for (i = 0; i < len; i++) {
1410 const char *name = NULL;
1411
1412 of_property_read_string_index(np,
1413 "qcom,bus-speed-mode", i, &name);
1414 if (!name)
1415 continue;
1416
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001417 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1418 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1419 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1420 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1421 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das33a4ff52012-12-18 16:14:02 +05301422 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1423 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1424 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1425 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1426 pdata->caps |= MMC_CAP_1_8V_DDR
1427 | MMC_CAP_UHS_DDR50;
1428 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1429 pdata->caps |= MMC_CAP_1_2V_DDR
1430 | MMC_CAP_UHS_DDR50;
1431 }
1432
1433 if (of_get_property(np, "qcom,nonremovable", NULL))
1434 pdata->nonremovable = true;
1435
1436 return pdata;
1437out:
1438 return NULL;
1439}
1440
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301441/* Returns required bandwidth in Bytes per Sec */
1442static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1443 struct mmc_ios *ios)
1444{
Sahitya Tummala53aff982013-04-03 18:03:31 +05301445 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1446 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1447
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301448 unsigned int bw;
1449
Sahitya Tummala53aff982013-04-03 18:03:31 +05301450 bw = msm_host->clk_rate;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301451 /*
1452 * For DDR mode, SDCC controller clock will be at
1453 * the double rate than the actual clock that goes to card.
1454 */
1455 if (ios->bus_width == MMC_BUS_WIDTH_4)
1456 bw /= 2;
1457 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1458 bw /= 8;
1459
1460 return bw;
1461}
1462
1463static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1464 unsigned int bw)
1465{
1466 unsigned int *table = host->pdata->voting_data->bw_vecs;
1467 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1468 int i;
1469
1470 if (host->msm_bus_vote.is_max_bw_needed && bw)
1471 return host->msm_bus_vote.max_bw_vote;
1472
1473 for (i = 0; i < size; i++) {
1474 if (bw <= table[i])
1475 break;
1476 }
1477
1478 if (i && (i == size))
1479 i--;
1480
1481 return i;
1482}
1483
1484/*
1485 * This function must be called with host lock acquired.
1486 * Caller of this function should also ensure that msm bus client
1487 * handle is not null.
1488 */
1489static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1490 int vote,
1491 unsigned long flags)
1492{
1493 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1494 int rc = 0;
1495
1496 if (vote != msm_host->msm_bus_vote.curr_vote) {
1497 spin_unlock_irqrestore(&host->lock, flags);
1498 rc = msm_bus_scale_client_update_request(
1499 msm_host->msm_bus_vote.client_handle, vote);
1500 spin_lock_irqsave(&host->lock, flags);
1501 if (rc) {
1502 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1503 mmc_hostname(host->mmc),
1504 msm_host->msm_bus_vote.client_handle, vote, rc);
1505 goto out;
1506 }
1507 msm_host->msm_bus_vote.curr_vote = vote;
1508 }
1509out:
1510 return rc;
1511}
1512
1513/*
1514 * Internal work. Work to set 0 bandwidth for msm bus.
1515 */
1516static void sdhci_msm_bus_work(struct work_struct *work)
1517{
1518 struct sdhci_msm_host *msm_host;
1519 struct sdhci_host *host;
1520 unsigned long flags;
1521
1522 msm_host = container_of(work, struct sdhci_msm_host,
1523 msm_bus_vote.vote_work.work);
1524 host = platform_get_drvdata(msm_host->pdev);
1525
1526 if (!msm_host->msm_bus_vote.client_handle)
1527 return;
1528
1529 spin_lock_irqsave(&host->lock, flags);
1530 /* don't vote for 0 bandwidth if any request is in progress */
1531 if (!host->mrq) {
1532 sdhci_msm_bus_set_vote(msm_host,
1533 msm_host->msm_bus_vote.min_bw_vote, flags);
1534 } else
1535 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1536 mmc_hostname(host->mmc), __func__);
1537 spin_unlock_irqrestore(&host->lock, flags);
1538}
1539
1540/*
1541 * This function cancels any scheduled delayed work and sets the bus
1542 * vote based on bw (bandwidth) argument.
1543 */
1544static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1545 unsigned int bw)
1546{
1547 int vote;
1548 unsigned long flags;
1549 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1550 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1551
1552 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1553 spin_lock_irqsave(&host->lock, flags);
1554 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
1555 sdhci_msm_bus_set_vote(msm_host, vote, flags);
1556 spin_unlock_irqrestore(&host->lock, flags);
1557}
1558
1559#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1560
1561/* This function queues a work which will set the bandwidth requiement to 0 */
1562static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1563{
1564 unsigned long flags;
1565 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1566 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1567
1568 spin_lock_irqsave(&host->lock, flags);
1569 if (msm_host->msm_bus_vote.min_bw_vote !=
1570 msm_host->msm_bus_vote.curr_vote)
1571 queue_delayed_work(system_nrt_wq,
1572 &msm_host->msm_bus_vote.vote_work,
1573 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1574 spin_unlock_irqrestore(&host->lock, flags);
1575}
1576
1577static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1578 struct platform_device *pdev)
1579{
1580 int rc = 0;
1581 struct msm_bus_scale_pdata *bus_pdata;
1582
1583 struct sdhci_msm_bus_voting_data *data;
1584 struct device *dev = &pdev->dev;
1585
1586 data = devm_kzalloc(dev,
1587 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1588 if (!data) {
1589 dev_err(&pdev->dev,
1590 "%s: failed to allocate memory\n", __func__);
1591 rc = -ENOMEM;
1592 goto out;
1593 }
1594 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1595 if (data->bus_pdata) {
1596 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1597 &data->bw_vecs, &data->bw_vecs_size, 0);
1598 if (rc) {
1599 dev_err(&pdev->dev,
1600 "%s: Failed to get bus-bw-vectors-bps\n",
1601 __func__);
1602 goto out;
1603 }
1604 host->pdata->voting_data = data;
1605 }
1606 if (host->pdata->voting_data &&
1607 host->pdata->voting_data->bus_pdata &&
1608 host->pdata->voting_data->bw_vecs &&
1609 host->pdata->voting_data->bw_vecs_size) {
1610
1611 bus_pdata = host->pdata->voting_data->bus_pdata;
1612 host->msm_bus_vote.client_handle =
1613 msm_bus_scale_register_client(bus_pdata);
1614 if (!host->msm_bus_vote.client_handle) {
1615 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1616 rc = -EFAULT;
1617 goto out;
1618 }
1619 /* cache the vote index for minimum and maximum bandwidth */
1620 host->msm_bus_vote.min_bw_vote =
1621 sdhci_msm_bus_get_vote_for_bw(host, 0);
1622 host->msm_bus_vote.max_bw_vote =
1623 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1624 } else {
1625 devm_kfree(dev, data);
1626 }
1627
1628out:
1629 return rc;
1630}
1631
1632static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1633{
1634 if (host->msm_bus_vote.client_handle)
1635 msm_bus_scale_unregister_client(
1636 host->msm_bus_vote.client_handle);
1637}
1638
1639static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1640{
1641 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1642 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1643 struct mmc_ios *ios = &host->mmc->ios;
1644 unsigned int bw;
1645
1646 if (!msm_host->msm_bus_vote.client_handle)
1647 return;
1648
1649 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301650 if (enable) {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301651 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301652 } else {
1653 /*
1654 * If clock gating is enabled, then remove the vote
1655 * immediately because clocks will be disabled only
1656 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1657 * additional delay is required to remove the bus vote.
1658 */
1659 if (host->mmc->clkgate_delay)
1660 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1661 else
1662 sdhci_msm_bus_queue_work(host);
1663 }
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301664}
1665
Asutosh Das33a4ff52012-12-18 16:14:02 +05301666/* Regulator utility functions */
1667static int sdhci_msm_vreg_init_reg(struct device *dev,
1668 struct sdhci_msm_reg_data *vreg)
1669{
1670 int ret = 0;
1671
1672 /* check if regulator is already initialized? */
1673 if (vreg->reg)
1674 goto out;
1675
1676 /* Get the regulator handle */
1677 vreg->reg = devm_regulator_get(dev, vreg->name);
1678 if (IS_ERR(vreg->reg)) {
1679 ret = PTR_ERR(vreg->reg);
1680 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1681 __func__, vreg->name, ret);
1682 goto out;
1683 }
1684
Asutosh Das95afcad2013-06-28 15:03:44 +05301685 if (regulator_count_voltages(vreg->reg) > 0) {
1686 vreg->set_voltage_sup = true;
1687 /* sanity check */
1688 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1689 pr_err("%s: %s invalid constraints specified\n",
1690 __func__, vreg->name);
1691 ret = -EINVAL;
1692 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301693 }
1694
1695out:
1696 return ret;
1697}
1698
1699static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1700{
1701 if (vreg->reg)
1702 devm_regulator_put(vreg->reg);
1703}
1704
1705static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1706 *vreg, int uA_load)
1707{
1708 int ret = 0;
1709
1710 /*
1711 * regulators that do not support regulator_set_voltage also
1712 * do not support regulator_set_optimum_mode
1713 */
Asutosh Das95afcad2013-06-28 15:03:44 +05301714 if (vreg->set_voltage_sup) {
1715 ret = regulator_set_optimum_mode(vreg->reg, uA_load);
1716 if (ret < 0)
1717 pr_err("%s: regulator_set_optimum_mode(reg=%s,uA_load=%d) failed. ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301718 __func__, vreg->name, uA_load, ret);
1719 else
1720 /*
1721 * regulator_set_optimum_mode() can return non zero
1722 * value even for success case.
1723 */
1724 ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301725 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301726 return ret;
1727}
1728
1729static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
1730 int min_uV, int max_uV)
1731{
1732 int ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301733 if (vreg->set_voltage_sup) {
1734 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
1735 if (ret) {
1736 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301737 __func__, vreg->name, min_uV, max_uV, ret);
1738 }
Asutosh Das95afcad2013-06-28 15:03:44 +05301739 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301740
1741 return ret;
1742}
1743
1744static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
1745{
1746 int ret = 0;
1747
1748 /* Put regulator in HPM (high power mode) */
1749 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
1750 if (ret < 0)
1751 return ret;
1752
1753 if (!vreg->is_enabled) {
1754 /* Set voltage level */
1755 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
1756 vreg->high_vol_level);
1757 if (ret)
1758 return ret;
1759 }
1760 ret = regulator_enable(vreg->reg);
1761 if (ret) {
1762 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
1763 __func__, vreg->name, ret);
1764 return ret;
1765 }
1766 vreg->is_enabled = true;
1767 return ret;
1768}
1769
1770static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
1771{
1772 int ret = 0;
1773
1774 /* Never disable regulator marked as always_on */
1775 if (vreg->is_enabled && !vreg->is_always_on) {
1776 ret = regulator_disable(vreg->reg);
1777 if (ret) {
1778 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
1779 __func__, vreg->name, ret);
1780 goto out;
1781 }
1782 vreg->is_enabled = false;
1783
1784 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
1785 if (ret < 0)
1786 goto out;
1787
1788 /* Set min. voltage level to 0 */
1789 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
1790 if (ret)
1791 goto out;
1792 } else if (vreg->is_enabled && vreg->is_always_on) {
1793 if (vreg->lpm_sup) {
1794 /* Put always_on regulator in LPM (low power mode) */
1795 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
1796 vreg->lpm_uA);
1797 if (ret < 0)
1798 goto out;
1799 }
1800 }
1801out:
1802 return ret;
1803}
1804
1805static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
1806 bool enable, bool is_init)
1807{
1808 int ret = 0, i;
1809 struct sdhci_msm_slot_reg_data *curr_slot;
1810 struct sdhci_msm_reg_data *vreg_table[2];
1811
1812 curr_slot = pdata->vreg_data;
1813 if (!curr_slot) {
1814 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
1815 __func__);
1816 goto out;
1817 }
1818
1819 vreg_table[0] = curr_slot->vdd_data;
1820 vreg_table[1] = curr_slot->vdd_io_data;
1821
1822 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
1823 if (vreg_table[i]) {
1824 if (enable)
1825 ret = sdhci_msm_vreg_enable(vreg_table[i]);
1826 else
1827 ret = sdhci_msm_vreg_disable(vreg_table[i]);
1828 if (ret)
1829 goto out;
1830 }
1831 }
1832out:
1833 return ret;
1834}
1835
1836/*
1837 * Reset vreg by ensuring it is off during probe. A call
1838 * to enable vreg is needed to balance disable vreg
1839 */
1840static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
1841{
1842 int ret;
1843
1844 ret = sdhci_msm_setup_vreg(pdata, 1, true);
1845 if (ret)
1846 return ret;
1847 ret = sdhci_msm_setup_vreg(pdata, 0, true);
1848 return ret;
1849}
1850
1851/* This init function should be called only once for each SDHC slot */
1852static int sdhci_msm_vreg_init(struct device *dev,
1853 struct sdhci_msm_pltfm_data *pdata,
1854 bool is_init)
1855{
1856 int ret = 0;
1857 struct sdhci_msm_slot_reg_data *curr_slot;
1858 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
1859
1860 curr_slot = pdata->vreg_data;
1861 if (!curr_slot)
1862 goto out;
1863
1864 curr_vdd_reg = curr_slot->vdd_data;
1865 curr_vdd_io_reg = curr_slot->vdd_io_data;
1866
1867 if (!is_init)
1868 /* Deregister all regulators from regulator framework */
1869 goto vdd_io_reg_deinit;
1870
1871 /*
1872 * Get the regulator handle from voltage regulator framework
1873 * and then try to set the voltage level for the regulator
1874 */
1875 if (curr_vdd_reg) {
1876 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
1877 if (ret)
1878 goto out;
1879 }
1880 if (curr_vdd_io_reg) {
1881 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
1882 if (ret)
1883 goto vdd_reg_deinit;
1884 }
1885 ret = sdhci_msm_vreg_reset(pdata);
1886 if (ret)
1887 dev_err(dev, "vreg reset failed (%d)\n", ret);
1888 goto out;
1889
1890vdd_io_reg_deinit:
1891 if (curr_vdd_io_reg)
1892 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
1893vdd_reg_deinit:
1894 if (curr_vdd_reg)
1895 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
1896out:
1897 return ret;
1898}
1899
1900
1901static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
1902 enum vdd_io_level level,
1903 unsigned int voltage_level)
1904{
1905 int ret = 0;
1906 int set_level;
1907 struct sdhci_msm_reg_data *vdd_io_reg;
1908
1909 if (!pdata->vreg_data)
1910 return ret;
1911
1912 vdd_io_reg = pdata->vreg_data->vdd_io_data;
1913 if (vdd_io_reg && vdd_io_reg->is_enabled) {
1914 switch (level) {
1915 case VDD_IO_LOW:
1916 set_level = vdd_io_reg->low_vol_level;
1917 break;
1918 case VDD_IO_HIGH:
1919 set_level = vdd_io_reg->high_vol_level;
1920 break;
1921 case VDD_IO_SET_LEVEL:
1922 set_level = voltage_level;
1923 break;
1924 default:
1925 pr_err("%s: invalid argument level = %d",
1926 __func__, level);
1927 ret = -EINVAL;
1928 return ret;
1929 }
1930 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
1931 set_level);
1932 }
1933 return ret;
1934}
1935
1936static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1937{
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07001938 struct sdhci_host *host = (struct sdhci_host *)data;
1939 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1940 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301941 u8 irq_status = 0;
1942 u8 irq_ack = 0;
1943 int ret = 0;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301944 int pwr_state = 0, io_level = 0;
1945 unsigned long flags;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301946
1947 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
1948 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
1949 mmc_hostname(msm_host->mmc), irq, irq_status);
1950
1951 /* Clear the interrupt */
1952 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
1953 /*
1954 * SDHC has core_mem and hc_mem device memory and these memory
1955 * addresses do not fall within 1KB region. Hence, any update to
1956 * core_mem address space would require an mb() to ensure this gets
1957 * completed before its next update to registers within hc_mem.
1958 */
1959 mb();
1960
1961 /* Handle BUS ON/OFF*/
1962 if (irq_status & CORE_PWRCTL_BUS_ON) {
1963 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301964 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301965 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301966 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1967 VDD_IO_HIGH, 0);
1968 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301969 if (ret)
1970 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1971 else
1972 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301973
1974 pwr_state = REQ_BUS_ON;
1975 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301976 }
1977 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1978 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301979 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301980 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301981 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1982 VDD_IO_LOW, 0);
1983 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301984 if (ret)
1985 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1986 else
1987 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301988
1989 pwr_state = REQ_BUS_OFF;
1990 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301991 }
1992 /* Handle IO LOW/HIGH */
1993 if (irq_status & CORE_PWRCTL_IO_LOW) {
1994 /* Switch voltage Low */
1995 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
1996 if (ret)
1997 irq_ack |= CORE_PWRCTL_IO_FAIL;
1998 else
1999 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302000
2001 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302002 }
2003 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2004 /* Switch voltage High */
2005 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2006 if (ret)
2007 irq_ack |= CORE_PWRCTL_IO_FAIL;
2008 else
2009 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302010
2011 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302012 }
2013
2014 /* ACK status to the core */
2015 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2016 /*
2017 * SDHC has core_mem and hc_mem device memory and these memory
2018 * addresses do not fall within 1KB region. Hence, any update to
2019 * core_mem address space would require an mb() to ensure this gets
2020 * completed before its next update to registers within hc_mem.
2021 */
2022 mb();
2023
Sahitya Tummala179e7382013-03-20 19:24:01 +05302024 if (io_level & REQ_IO_HIGH)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002025 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2026 ~CORE_IO_PAD_PWR_SWITCH),
2027 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302028 else if (io_level & REQ_IO_LOW)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002029 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2030 CORE_IO_PAD_PWR_SWITCH),
2031 host->ioaddr + CORE_VENDOR_SPEC);
2032 mb();
2033
Asutosh Das33a4ff52012-12-18 16:14:02 +05302034 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2035 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302036 spin_lock_irqsave(&host->lock, flags);
2037 if (pwr_state)
2038 msm_host->curr_pwr_state = pwr_state;
2039 if (io_level)
2040 msm_host->curr_io_level = io_level;
2041 complete(&msm_host->pwr_irq_completion);
2042 spin_unlock_irqrestore(&host->lock, flags);
2043
Asutosh Das33a4ff52012-12-18 16:14:02 +05302044 return IRQ_HANDLED;
2045}
2046
2047/* This function returns the max. current supported by VDD rail in mA */
2048static unsigned int sdhci_msm_get_vreg_vdd_max_current(struct sdhci_msm_host
2049 *host)
2050{
2051 struct sdhci_msm_slot_reg_data *curr_slot = host->pdata->vreg_data;
2052 if (!curr_slot)
2053 return 0;
2054 if (curr_slot->vdd_data)
2055 return curr_slot->vdd_data->hpm_uA / 1000;
2056 else
2057 return 0;
2058}
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302059
2060static ssize_t
2061show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2062{
2063 struct sdhci_host *host = dev_get_drvdata(dev);
2064 int poll;
2065 unsigned long flags;
2066
2067 spin_lock_irqsave(&host->lock, flags);
2068 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2069 spin_unlock_irqrestore(&host->lock, flags);
2070
2071 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2072}
2073
2074static ssize_t
2075store_polling(struct device *dev, struct device_attribute *attr,
2076 const char *buf, size_t count)
2077{
2078 struct sdhci_host *host = dev_get_drvdata(dev);
2079 int value;
2080 unsigned long flags;
2081
2082 if (!kstrtou32(buf, 0, &value)) {
2083 spin_lock_irqsave(&host->lock, flags);
2084 if (value) {
2085 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2086 mmc_detect_change(host->mmc, 0);
2087 } else {
2088 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2089 }
2090 spin_unlock_irqrestore(&host->lock, flags);
2091 }
2092 return count;
2093}
2094
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302095static ssize_t
2096show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2097 char *buf)
2098{
2099 struct sdhci_host *host = dev_get_drvdata(dev);
2100 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2101 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2102
2103 return snprintf(buf, PAGE_SIZE, "%u\n",
2104 msm_host->msm_bus_vote.is_max_bw_needed);
2105}
2106
2107static ssize_t
2108store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2109 const char *buf, size_t count)
2110{
2111 struct sdhci_host *host = dev_get_drvdata(dev);
2112 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2113 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2114 uint32_t value;
2115 unsigned long flags;
2116
2117 if (!kstrtou32(buf, 0, &value)) {
2118 spin_lock_irqsave(&host->lock, flags);
2119 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2120 spin_unlock_irqrestore(&host->lock, flags);
2121 }
2122 return count;
2123}
Asutosh Das33a4ff52012-12-18 16:14:02 +05302124
Sahitya Tummala179e7382013-03-20 19:24:01 +05302125static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das33a4ff52012-12-18 16:14:02 +05302126{
2127 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2128 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302129 unsigned long flags;
2130 bool done = false;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302131
Sahitya Tummala179e7382013-03-20 19:24:01 +05302132 spin_lock_irqsave(&host->lock, flags);
2133 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2134 mmc_hostname(host->mmc), __func__, req_type,
2135 msm_host->curr_pwr_state, msm_host->curr_io_level);
2136 if ((req_type & msm_host->curr_pwr_state) ||
2137 (req_type & msm_host->curr_io_level))
2138 done = true;
2139 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302140
Sahitya Tummala179e7382013-03-20 19:24:01 +05302141 /*
2142 * This is needed here to hanlde a case where IRQ gets
2143 * triggered even before this function is called so that
2144 * x->done counter of completion gets reset. Otherwise,
2145 * next call to wait_for_completion returns immediately
2146 * without actually waiting for the IRQ to be handled.
2147 */
2148 if (done)
2149 init_completion(&msm_host->pwr_irq_completion);
2150 else
2151 wait_for_completion(&msm_host->pwr_irq_completion);
2152
2153 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2154 __func__, req_type);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302155}
2156
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002157static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2158{
2159 if (enable)
2160 writel_relaxed((readl_relaxed(host->ioaddr +
2161 CORE_DLL_CONFIG) | CORE_CDR_EN),
2162 host->ioaddr + CORE_DLL_CONFIG);
2163 else
2164 writel_relaxed((readl_relaxed(host->ioaddr +
2165 CORE_DLL_CONFIG) & ~CORE_CDR_EN),
2166 host->ioaddr + CORE_DLL_CONFIG);
2167}
2168
Asutosh Das3781bd82013-01-10 21:11:04 +05302169static unsigned int sdhci_msm_max_segs(void)
2170{
2171 return SDHCI_MSM_MAX_SEGMENTS;
2172}
2173
Sahitya Tummala00240122013-02-28 19:50:51 +05302174static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302175{
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302176 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2177 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302178
Sahitya Tummala00240122013-02-28 19:50:51 +05302179 return msm_host->pdata->sup_clk_table[0];
2180}
2181
2182static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2183{
2184 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2185 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2186 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2187
2188 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2189}
2190
2191static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2192 u32 req_clk)
2193{
2194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2195 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2196 unsigned int sel_clk = -1;
2197 unsigned char cnt;
2198
2199 if (req_clk < sdhci_msm_get_min_clock(host)) {
2200 sel_clk = sdhci_msm_get_min_clock(host);
2201 return sel_clk;
2202 }
2203
2204 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2205 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2206 break;
2207 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2208 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2209 break;
2210 } else {
2211 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2212 }
2213 }
2214 return sel_clk;
2215}
2216
Asutosh Das6c0804b2013-11-08 12:33:47 +05302217static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2218{
2219 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2220 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2221 int rc = 0;
2222
2223 if (atomic_read(&msm_host->controller_clock))
2224 return 0;
2225
2226 sdhci_msm_bus_voting(host, 1);
2227
2228 if (!IS_ERR(msm_host->pclk)) {
2229 rc = clk_prepare_enable(msm_host->pclk);
2230 if (rc) {
2231 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2232 mmc_hostname(host->mmc), __func__, rc);
2233 goto remove_vote;
2234 }
2235 }
2236
2237 rc = clk_prepare_enable(msm_host->clk);
2238 if (rc) {
2239 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2240 mmc_hostname(host->mmc), __func__, rc);
2241 goto disable_pclk;
2242 }
2243
2244 atomic_set(&msm_host->controller_clock, 1);
2245 pr_debug("%s: %s: enabled controller clock\n",
2246 mmc_hostname(host->mmc), __func__);
2247 goto out;
2248
2249disable_pclk:
2250 if (!IS_ERR(msm_host->pclk))
2251 clk_disable_unprepare(msm_host->pclk);
2252remove_vote:
2253 if (msm_host->msm_bus_vote.client_handle)
2254 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2255out:
2256 return rc;
2257}
2258
2259
2260
Sahitya Tummala00240122013-02-28 19:50:51 +05302261static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2262{
2263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2264 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2265 int rc = 0;
2266
2267 if (enable && !atomic_read(&msm_host->clks_on)) {
2268 pr_debug("%s: request to enable clocks\n",
2269 mmc_hostname(host->mmc));
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302270
Asutosh Das6c0804b2013-11-08 12:33:47 +05302271 /*
2272 * The bus-width or the clock rate might have changed
2273 * after controller clocks are enbaled, update bus vote
2274 * in such case.
2275 */
2276 if (atomic_read(&msm_host->controller_clock))
2277 sdhci_msm_bus_voting(host, 1);
2278
2279 rc = sdhci_msm_enable_controller_clock(host);
2280 if (rc)
2281 goto remove_vote;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302282
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302283 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2284 rc = clk_prepare_enable(msm_host->bus_clk);
2285 if (rc) {
2286 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2287 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302288 goto disable_controller_clk;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302289 }
2290 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002291 if (!IS_ERR(msm_host->ff_clk)) {
2292 rc = clk_prepare_enable(msm_host->ff_clk);
2293 if (rc) {
2294 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2295 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302296 goto disable_bus_clk;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002297 }
2298 }
2299 if (!IS_ERR(msm_host->sleep_clk)) {
2300 rc = clk_prepare_enable(msm_host->sleep_clk);
2301 if (rc) {
2302 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2303 mmc_hostname(host->mmc), __func__, rc);
2304 goto disable_ff_clk;
2305 }
2306 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302307 mb();
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302308
Sahitya Tummala00240122013-02-28 19:50:51 +05302309 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302310 pr_debug("%s: request to disable clocks\n",
2311 mmc_hostname(host->mmc));
2312 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2313 mb();
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002314 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2315 clk_disable_unprepare(msm_host->sleep_clk);
2316 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2317 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302318 clk_disable_unprepare(msm_host->clk);
2319 if (!IS_ERR(msm_host->pclk))
2320 clk_disable_unprepare(msm_host->pclk);
2321 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2322 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302323
Asutosh Das6c0804b2013-11-08 12:33:47 +05302324 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302325 sdhci_msm_bus_voting(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302326 }
Sahitya Tummala00240122013-02-28 19:50:51 +05302327 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302328 goto out;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002329disable_ff_clk:
2330 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2331 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302332disable_bus_clk:
2333 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2334 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302335disable_controller_clk:
2336 if (!IS_ERR_OR_NULL(msm_host->clk))
2337 clk_disable_unprepare(msm_host->clk);
2338 if (!IS_ERR_OR_NULL(msm_host->pclk))
2339 clk_disable_unprepare(msm_host->pclk);
2340 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302341remove_vote:
2342 if (msm_host->msm_bus_vote.client_handle)
2343 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302344out:
Sahitya Tummala00240122013-02-28 19:50:51 +05302345 return rc;
2346}
2347
2348static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2349{
2350 int rc;
2351 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2352 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2353 struct mmc_ios curr_ios = host->mmc->ios;
2354 u32 sup_clock, ddr_clock;
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302355 bool curr_pwrsave;
Sahitya Tummala00240122013-02-28 19:50:51 +05302356
2357 if (!clock) {
2358 sdhci_msm_prepare_clocks(host, false);
2359 host->clock = clock;
2360 return;
2361 }
2362
2363 rc = sdhci_msm_prepare_clocks(host, true);
2364 if (rc)
2365 return;
2366
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302367 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2368 CORE_CLK_PWRSAVE);
Sahitya Tummala2c4bd642013-08-29 16:21:08 +05302369 if ((clock > 400000) &&
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302370 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
2371 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2372 | CORE_CLK_PWRSAVE,
2373 host->ioaddr + CORE_VENDOR_SPEC);
2374 /*
2375 * Disable pwrsave for a newly added card if doesn't allow clock
2376 * gating.
2377 */
2378 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
2379 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2380 & ~CORE_CLK_PWRSAVE,
2381 host->ioaddr + CORE_VENDOR_SPEC);
2382
Sahitya Tummala00240122013-02-28 19:50:51 +05302383 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002384 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
2385 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala00240122013-02-28 19:50:51 +05302386 /*
2387 * The SDHC requires internal clock frequency to be double the
2388 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002389 * uses the faster clock(100/400MHz) for some of its parts and
2390 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala00240122013-02-28 19:50:51 +05302391 */
2392 ddr_clock = clock * 2;
2393 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2394 ddr_clock);
2395 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002396
2397 /*
2398 * In general all timing modes are controlled via UHS mode select in
2399 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2400 * their respective modes defined here, hence we use these values.
2401 *
2402 * HS200 - SDR104 (Since they both are equivalent in functionality)
2403 * HS400 - This involves multiple configurations
2404 * Initially SDR104 - when tuning is required as HS200
2405 * Then when switching to DDR @ 400MHz (HS400) we use
2406 * the vendor specific HC_SELECT_IN to control the mode.
2407 *
2408 * In addition to controlling the modes we also need to select the
2409 * correct input clock for DLL depending on the mode.
2410 *
2411 * HS400 - divided clock (free running MCLK/2)
2412 * All other modes - default (free running MCLK)
2413 */
2414 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2415 /* Select the divided clock (free running MCLK/2) */
2416 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2417 & ~CORE_HC_MCLK_SEL_MASK)
2418 | CORE_HC_MCLK_SEL_HS400),
2419 host->ioaddr + CORE_VENDOR_SPEC);
2420 /*
2421 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2422 * register
2423 */
2424 if (msm_host->tuning_done && !msm_host->calibration_done) {
2425 /*
2426 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2427 * field in VENDOR_SPEC_FUNC
2428 */
2429 writel_relaxed((readl_relaxed(host->ioaddr + \
2430 CORE_VENDOR_SPEC)
2431 | CORE_HC_SELECT_IN_HS400
2432 | CORE_HC_SELECT_IN_EN),
2433 host->ioaddr + CORE_VENDOR_SPEC);
2434 }
2435 } else {
2436 /* Select the default clock (free running MCLK) */
2437 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2438 & ~CORE_HC_MCLK_SEL_MASK)
2439 | CORE_HC_MCLK_SEL_DFLT),
2440 host->ioaddr + CORE_VENDOR_SPEC);
2441
2442 /*
2443 * Disable HC_SELECT_IN to be able to use the UHS mode select
2444 * configuration from Host Control2 register for all other
2445 * modes.
2446 *
2447 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2448 * in VENDOR_SPEC_FUNC
2449 */
2450 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2451 & ~CORE_HC_SELECT_IN_EN
2452 & ~CORE_HC_SELECT_IN_MASK),
2453 host->ioaddr + CORE_VENDOR_SPEC);
2454 }
2455 mb();
2456
Sahitya Tummala00240122013-02-28 19:50:51 +05302457 if (sup_clock != msm_host->clk_rate) {
2458 pr_debug("%s: %s: setting clk rate to %u\n",
2459 mmc_hostname(host->mmc), __func__, sup_clock);
2460 rc = clk_set_rate(msm_host->clk, sup_clock);
2461 if (rc) {
2462 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2463 mmc_hostname(host->mmc), __func__,
2464 sup_clock, rc);
2465 return;
2466 }
2467 msm_host->clk_rate = sup_clock;
2468 host->clock = clock;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302469 /*
2470 * Update the bus vote in case of frequency change due to
2471 * clock scaling.
2472 */
2473 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302474 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302475}
2476
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302477static int sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2478 unsigned int uhs)
2479{
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002480 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2481 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302482 u16 ctrl_2;
2483
2484 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2485 /* Select Bus Speed Mode for host */
2486 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002487 if (uhs == MMC_TIMING_MMC_HS400)
2488 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2489 else if (uhs == MMC_TIMING_MMC_HS200)
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302490 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2491 else if (uhs == MMC_TIMING_UHS_SDR12)
2492 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2493 else if (uhs == MMC_TIMING_UHS_SDR25)
2494 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2495 else if (uhs == MMC_TIMING_UHS_SDR50)
2496 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2497 else if (uhs == MMC_TIMING_UHS_SDR104)
2498 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2499 else if (uhs == MMC_TIMING_UHS_DDR50)
2500 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala00240122013-02-28 19:50:51 +05302501 /*
2502 * When clock frquency is less than 100MHz, the feedback clock must be
2503 * provided and DLL must not be used so that tuning can be skipped. To
2504 * provide feedback clock, the mode selection can be any value less
2505 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2506 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002507 if (host->clock <= CORE_FREQ_100MHZ) {
2508 if ((uhs == MMC_TIMING_MMC_HS400) ||
2509 (uhs == MMC_TIMING_MMC_HS200) ||
2510 (uhs == MMC_TIMING_UHS_SDR104))
2511 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala00240122013-02-28 19:50:51 +05302512
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002513 /*
2514 * Make sure DLL is disabled when not required
2515 *
2516 * Write 1 to DLL_RST bit of DLL_CONFIG register
2517 */
2518 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2519 | CORE_DLL_RST),
2520 host->ioaddr + CORE_DLL_CONFIG);
2521
2522 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2523 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2524 | CORE_DLL_PDN),
2525 host->ioaddr + CORE_DLL_CONFIG);
2526 mb();
2527
2528 /*
2529 * The DLL needs to be restored and CDCLP533 recalibrated
2530 * when the clock frequency is set back to 400MHz.
2531 */
2532 msm_host->calibration_done = false;
2533 }
2534
2535 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2536 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302537 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2538
2539 return 0;
2540}
2541
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002542/*
2543 * sdhci_msm_disable_data_xfer - disable undergoing AHB bus data transfer
2544 *
2545 * Write 0 to bit 0 in MCI_DATA_CTL (offset 0x2C) - clearing TxActive bit by
2546 * access to legacy registers. It will stop current burst and prevent start of
2547 * the next on.
2548 *
2549 * Polling CORE_AHB_DATA_DELAY_US timeout, by reading bit 13:12 until they are 0
2550 * in CORE_SDCC_DEBUG_REG (offset 0x124) will validate that AHB burst was
2551 * completed and a new one didn't start.
2552 *
2553 * Waiting for 4us while AHB finishes descriptors fetch.
2554 */
2555static void sdhci_msm_disable_data_xfer(struct sdhci_host *host)
2556{
2557 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2558 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2559 u32 value;
2560 int ret;
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -07002561 u32 version;
2562
2563 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2564 /* Core version 3.1.0 doesn't need this workaround */
2565 if (version == CORE_VERSION_310)
2566 return;
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002567
2568 value = readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CTRL);
2569 value &= ~(u32)CORE_MCI_DPSM_ENABLE;
2570 writel_relaxed(value, msm_host->core_mem + CORE_MCI_DATA_CTRL);
2571
2572 /* Enable the test bus for device slot */
2573 writel_relaxed(CORE_TESTBUS_ENA | CORE_TESTBUS_SEL2,
2574 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2575
2576 ret = readl_poll_timeout_noirq(msm_host->core_mem
2577 + CORE_SDCC_DEBUG_REG, value,
2578 !(value & CORE_DEBUG_REG_AHB_HTRANS),
2579 CORE_AHB_DATA_DELAY_US, 1);
2580 if (ret) {
2581 pr_err("%s: %s: can't stop ongoing AHB bus access by ADMA\n",
2582 mmc_hostname(host->mmc), __func__);
2583 BUG();
2584 }
2585 /* Disable the test bus for device slot */
2586 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
2587 value &= ~CORE_TESTBUS_ENA;
2588 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
2589
2590 udelay(CORE_AHB_DESC_DELAY_US);
2591}
2592
Asutosh Das33a4ff52012-12-18 16:14:02 +05302593static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302594 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302595 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002596 .execute_tuning = sdhci_msm_execute_tuning,
2597 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das3781bd82013-01-10 21:11:04 +05302598 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302599 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala00240122013-02-28 19:50:51 +05302600 .get_min_clock = sdhci_msm_get_min_clock,
2601 .get_max_clock = sdhci_msm_get_max_clock,
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002602 .disable_data_xfer = sdhci_msm_disable_data_xfer,
Asutosh Das6c0804b2013-11-08 12:33:47 +05302603 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302604};
2605
2606static int __devinit sdhci_msm_probe(struct platform_device *pdev)
2607{
2608 struct sdhci_host *host;
2609 struct sdhci_pltfm_host *pltfm_host;
2610 struct sdhci_msm_host *msm_host;
2611 struct resource *core_memres = NULL;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302612 int ret = 0, dead = 0;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302613 u32 vdd_max_current;
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002614 u16 host_version;
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302615 u32 pwr, irq_status, irq_ctl;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302616
2617 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
2618 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
2619 GFP_KERNEL);
2620 if (!msm_host) {
2621 ret = -ENOMEM;
2622 goto out;
2623 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302624
2625 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
2626 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata);
2627 if (IS_ERR(host)) {
2628 ret = PTR_ERR(host);
2629 goto out;
2630 }
2631
2632 pltfm_host = sdhci_priv(host);
2633 pltfm_host->priv = msm_host;
2634 msm_host->mmc = host->mmc;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302635 msm_host->pdev = pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302636
2637 /* Extract platform data */
2638 if (pdev->dev.of_node) {
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -07002639 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
2640 if (ret < 0) {
2641 dev_err(&pdev->dev, "Failed to get slot index %d\n",
2642 ret);
2643 goto pltfm_free;
2644 }
2645 if (disable_slots & (1 << (ret - 1))) {
2646 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
2647 ret);
2648 ret = -ENODEV;
2649 goto pltfm_free;
2650 }
2651
Asutosh Das33a4ff52012-12-18 16:14:02 +05302652 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev);
2653 if (!msm_host->pdata) {
2654 dev_err(&pdev->dev, "DT parsing error\n");
2655 goto pltfm_free;
2656 }
2657 } else {
2658 dev_err(&pdev->dev, "No device tree node\n");
2659 goto pltfm_free;
2660 }
2661
2662 /* Setup Clocks */
2663
2664 /* Setup SDCC bus voter clock. */
2665 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
2666 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2667 /* Vote for max. clk rate for max. performance */
2668 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2669 if (ret)
2670 goto pltfm_free;
2671 ret = clk_prepare_enable(msm_host->bus_clk);
2672 if (ret)
2673 goto pltfm_free;
2674 }
2675
2676 /* Setup main peripheral bus clock */
2677 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
2678 if (!IS_ERR(msm_host->pclk)) {
2679 ret = clk_prepare_enable(msm_host->pclk);
2680 if (ret)
2681 goto bus_clk_disable;
2682 }
Asutosh Das6c0804b2013-11-08 12:33:47 +05302683 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302684
2685 /* Setup SDC MMC clock */
2686 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
2687 if (IS_ERR(msm_host->clk)) {
2688 ret = PTR_ERR(msm_host->clk);
2689 goto pclk_disable;
2690 }
2691
Sahitya Tummala00240122013-02-28 19:50:51 +05302692 /* Set to the minimum supported clock frequency */
2693 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
2694 if (ret) {
2695 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302696 goto pclk_disable;
Sahitya Tummala00240122013-02-28 19:50:51 +05302697 }
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302698 ret = clk_prepare_enable(msm_host->clk);
2699 if (ret)
2700 goto pclk_disable;
2701
Sahitya Tummala00240122013-02-28 19:50:51 +05302702 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302703 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302704
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002705 /* Setup CDC calibration fixed feedback clock */
2706 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
2707 if (!IS_ERR(msm_host->ff_clk)) {
2708 ret = clk_prepare_enable(msm_host->ff_clk);
2709 if (ret)
2710 goto clk_disable;
2711 }
2712
2713 /* Setup CDC calibration sleep clock */
2714 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
2715 if (!IS_ERR(msm_host->sleep_clk)) {
2716 ret = clk_prepare_enable(msm_host->sleep_clk);
2717 if (ret)
2718 goto ff_clk_disable;
2719 }
2720
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -07002721 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2722
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302723 ret = sdhci_msm_bus_register(msm_host, pdev);
2724 if (ret)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002725 goto sleep_clk_disable;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302726
2727 if (msm_host->msm_bus_vote.client_handle)
2728 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
2729 sdhci_msm_bus_work);
2730 sdhci_msm_bus_voting(host, 1);
2731
Asutosh Das33a4ff52012-12-18 16:14:02 +05302732 /* Setup regulators */
2733 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
2734 if (ret) {
2735 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302736 goto bus_unregister;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302737 }
2738
2739 /* Reset the core and Enable SDHC mode */
2740 core_memres = platform_get_resource_byname(pdev,
2741 IORESOURCE_MEM, "core_mem");
2742 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
2743 resource_size(core_memres));
2744
2745 if (!msm_host->core_mem) {
2746 dev_err(&pdev->dev, "Failed to remap registers\n");
2747 ret = -ENOMEM;
2748 goto vreg_deinit;
2749 }
2750
Stepan Moskovchenkoe0938982013-09-13 22:19:33 -07002751 /* Unset HC_MODE_EN bit in HC_MODE register */
2752 writel_relaxed(0, (msm_host->core_mem + CORE_HC_MODE));
2753
Asutosh Das33a4ff52012-12-18 16:14:02 +05302754 /* Set SW_RST bit in POWER register (Offset 0x0) */
Sahitya Tummalad5d76e72013-04-25 11:50:56 +05302755 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
2756 CORE_SW_RST, msm_host->core_mem + CORE_POWER);
2757 /*
2758 * SW reset can take upto 10HCLK + 15MCLK cycles.
2759 * Calculating based on min clk rates (hclk = 27MHz,
2760 * mclk = 400KHz) it comes to ~40us. Let's poll for
2761 * max. 1ms for reset completion.
2762 */
2763 ret = readl_poll_timeout(msm_host->core_mem + CORE_POWER,
2764 pwr, !(pwr & CORE_SW_RST), 100, 10);
2765
2766 if (ret) {
2767 dev_err(&pdev->dev, "reset failed (%d)\n", ret);
2768 goto vreg_deinit;
2769 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302770 /* Set HC_MODE_EN bit in HC_MODE register */
2771 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
2772
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002773 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
2774 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
2775 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
2776
Asutosh Das33a4ff52012-12-18 16:14:02 +05302777 /*
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302778 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
2779 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
2780 * interrupt in GIC (by registering the interrupt handler), we need to
2781 * ensure that any pending power irq interrupt status is acknowledged
2782 * otherwise power irq interrupt handler would be fired prematurely.
2783 */
2784 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2785 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2786 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
2787 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
2788 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
2789 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
2790 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
2791 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
2792 /*
2793 * Ensure that above writes are propogated before interrupt enablement
2794 * in GIC.
2795 */
2796 mb();
2797
2798 /*
Asutosh Das33a4ff52012-12-18 16:14:02 +05302799 * Following are the deviations from SDHC spec v3.0 -
2800 * 1. Card detection is handled using separate GPIO.
2801 * 2. Bus power control is handled by interacting with PMIC.
2802 */
2803 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
2804 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala00240122013-02-28 19:50:51 +05302805 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
2806 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummalad6a74b02013-02-25 15:50:08 +05302807 host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
Krishna Kondaa20d3362013-04-01 21:01:59 -07002808 host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
Sahitya Tummalad2ae8832013-04-12 11:49:11 +05302809 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummalae6886bd2013-04-12 12:11:20 +05302810 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala4d12d0b2013-04-12 11:59:25 +05302811 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302812
Sahitya Tummalaf667cc12013-06-10 16:32:51 +05302813 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
2814 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
2815
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002816 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -07002817 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2818 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2819 SDHCI_VENDOR_VER_SHIFT));
2820 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
2821 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
2822 /*
2823 * Add 40us delay in interrupt handler when
2824 * operating at initialization frequency(400KHz).
2825 */
2826 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
2827 /*
2828 * Set Software Reset for DAT line in Software
2829 * Reset Register (Bit 2).
2830 */
2831 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
2832 }
2833
2834 /* Setup PWRCTL irq */
Asutosh Dasbbc84782013-02-11 15:31:35 +05302835 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2836 if (msm_host->pwr_irq < 0) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05302837 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302838 msm_host->pwr_irq);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302839 goto vreg_deinit;
2840 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302841 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302842 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002843 dev_name(&pdev->dev), host);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302844 if (ret) {
2845 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302846 msm_host->pwr_irq, ret);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302847 goto vreg_deinit;
2848 }
2849
2850 /* Enable pwr irq interrupts */
2851 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
2852
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302853 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
2854 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
2855
Asutosh Das33a4ff52012-12-18 16:14:02 +05302856 /* Set host capabilities */
2857 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
2858 msm_host->mmc->caps |= msm_host->pdata->caps;
2859
2860 vdd_max_current = sdhci_msm_get_vreg_vdd_max_current(msm_host);
2861 if (vdd_max_current >= 800)
2862 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2863 else if (vdd_max_current >= 600)
2864 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2865 else if (vdd_max_current >= 400)
2866 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2867 else
2868 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2869
2870 if (vdd_max_current > 150)
2871 msm_host->mmc->caps |= MMC_CAP_SET_XPC_180 |
2872 MMC_CAP_SET_XPC_300|
2873 MMC_CAP_SET_XPC_330;
2874
2875 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302876 msm_host->mmc->caps2 |= MMC_CAP2_CORE_RUNTIME_PM;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302877 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR;
2878 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
2879 msm_host->mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC |
2880 MMC_CAP2_DETECT_ON_ERR);
2881 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
2882 msm_host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302883 msm_host->mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
Sahitya Tummala00240122013-02-28 19:50:51 +05302884 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Konstantin Dorfmanfa436d52013-04-17 16:26:11 +03002885 msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
Subhash Jadavani61a52c92013-05-29 15:52:10 +05302886 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Asutosh Das41c35212013-09-19 11:14:27 +05302887 msm_host->mmc->caps2 |= MMC_CAP2_CORE_PM;
Asutosh Das4dc60412013-06-24 18:20:45 +05302888 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302889
2890 if (msm_host->pdata->nonremovable)
2891 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
2892
Sahitya Tummalab4e84042013-03-10 07:03:17 +05302893 host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
2894
Sahitya Tummala179e7382013-03-20 19:24:01 +05302895 init_completion(&msm_host->pwr_irq_completion);
2896
Sahitya Tummala62448d92013-03-12 14:57:46 +05302897 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
2898 ret = mmc_cd_gpio_request(msm_host->mmc,
2899 msm_host->pdata->status_gpio);
2900 if (ret) {
2901 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
2902 __func__, ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302903 goto vreg_deinit;
Sahitya Tummala62448d92013-03-12 14:57:46 +05302904 }
2905 }
2906
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +05302907 if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
2908 host->dma_mask = DMA_BIT_MASK(32);
2909 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2910 } else {
2911 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
2912 }
2913
Asutosh Das33a4ff52012-12-18 16:14:02 +05302914 ret = sdhci_add_host(host);
2915 if (ret) {
2916 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302917 goto free_cd_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302918 }
2919
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302920 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
2921 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
2922 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
2923 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
2924 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
2925 ret = device_create_file(&pdev->dev,
2926 &msm_host->msm_bus_vote.max_bus_bw);
2927 if (ret)
2928 goto remove_host;
2929
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302930 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
2931 msm_host->polling.show = show_polling;
2932 msm_host->polling.store = store_polling;
2933 sysfs_attr_init(&msm_host->polling.attr);
2934 msm_host->polling.attr.name = "polling";
2935 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
2936 ret = device_create_file(&pdev->dev, &msm_host->polling);
2937 if (ret)
2938 goto remove_max_bus_bw_file;
2939 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302940 ret = pm_runtime_set_active(&pdev->dev);
2941 if (ret)
2942 pr_err("%s: %s: pm_runtime_set_active failed: err: %d\n",
2943 mmc_hostname(host->mmc), __func__, ret);
Asutosh Das41c35212013-09-19 11:14:27 +05302944 else if (mmc_use_core_runtime_pm(host->mmc))
Asutosh Dasbbc84782013-02-11 15:31:35 +05302945 pm_runtime_enable(&pdev->dev);
2946
Asutosh Das33a4ff52012-12-18 16:14:02 +05302947 /* Successful initialization */
2948 goto out;
2949
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302950remove_max_bus_bw_file:
2951 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302952remove_host:
2953 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
2954 sdhci_remove_host(host, dead);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302955free_cd_gpio:
2956 if (gpio_is_valid(msm_host->pdata->status_gpio))
2957 mmc_cd_gpio_free(msm_host->mmc);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302958vreg_deinit:
2959 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302960bus_unregister:
2961 if (msm_host->msm_bus_vote.client_handle)
2962 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2963 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002964sleep_clk_disable:
2965 if (!IS_ERR(msm_host->sleep_clk))
2966 clk_disable_unprepare(msm_host->sleep_clk);
2967ff_clk_disable:
2968 if (!IS_ERR(msm_host->ff_clk))
2969 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302970clk_disable:
2971 if (!IS_ERR(msm_host->clk))
2972 clk_disable_unprepare(msm_host->clk);
2973pclk_disable:
2974 if (!IS_ERR(msm_host->pclk))
2975 clk_disable_unprepare(msm_host->pclk);
2976bus_clk_disable:
2977 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2978 clk_disable_unprepare(msm_host->bus_clk);
2979pltfm_free:
2980 sdhci_pltfm_free(pdev);
2981out:
2982 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
2983 return ret;
2984}
2985
2986static int __devexit sdhci_msm_remove(struct platform_device *pdev)
2987{
2988 struct sdhci_host *host = platform_get_drvdata(pdev);
2989 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2990 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2991 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
2992 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2993 0xffffffff);
2994
2995 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302996 if (!gpio_is_valid(msm_host->pdata->status_gpio))
2997 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302998 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302999 sdhci_remove_host(host, dead);
Asutosh Dasbbc84782013-02-11 15:31:35 +05303000 pm_runtime_disable(&pdev->dev);
Asutosh Das33a4ff52012-12-18 16:14:02 +05303001 sdhci_pltfm_free(pdev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303002
3003 if (gpio_is_valid(msm_host->pdata->status_gpio))
3004 mmc_cd_gpio_free(msm_host->mmc);
3005
Asutosh Das33a4ff52012-12-18 16:14:02 +05303006 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05303007
Asutosh Das33a4ff52012-12-18 16:14:02 +05303008 if (pdata->pin_data)
Asutosh Das390519d2012-12-21 12:21:42 +05303009 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05303010
3011 if (msm_host->msm_bus_vote.client_handle) {
3012 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3013 sdhci_msm_bus_unregister(msm_host);
3014 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05303015 return 0;
3016}
3017
Asutosh Dasbbc84782013-02-11 15:31:35 +05303018static int sdhci_msm_runtime_suspend(struct device *dev)
3019{
3020 struct sdhci_host *host = dev_get_drvdata(dev);
3021 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3022 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3023
3024 disable_irq(host->irq);
3025 disable_irq(msm_host->pwr_irq);
3026
Sahitya Tummalac45ae732013-05-23 15:59:22 +05303027 /*
3028 * Remove the vote immediately only if clocks are off in which
3029 * case we might have queued work to remove vote but it may not
3030 * be completed before runtime suspend or system suspend.
3031 */
3032 if (!atomic_read(&msm_host->clks_on)) {
3033 if (msm_host->msm_bus_vote.client_handle)
3034 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3035 }
3036
Asutosh Dasbbc84782013-02-11 15:31:35 +05303037 return 0;
3038}
3039
3040static int sdhci_msm_runtime_resume(struct device *dev)
3041{
3042 struct sdhci_host *host = dev_get_drvdata(dev);
3043 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3044 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3045
3046 enable_irq(msm_host->pwr_irq);
3047 enable_irq(host->irq);
3048
3049 return 0;
3050}
3051
3052#ifdef CONFIG_PM_SLEEP
3053
3054static int sdhci_msm_suspend(struct device *dev)
3055{
3056 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303057 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3058 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303059 int ret = 0;
3060
Sahitya Tummala62448d92013-03-12 14:57:46 +05303061 if (gpio_is_valid(msm_host->pdata->status_gpio))
3062 mmc_cd_gpio_free(msm_host->mmc);
3063
Asutosh Dasbbc84782013-02-11 15:31:35 +05303064 if (pm_runtime_suspended(dev)) {
3065 pr_debug("%s: %s: already runtime suspended\n",
3066 mmc_hostname(host->mmc), __func__);
3067 goto out;
3068 }
3069
3070 return sdhci_msm_runtime_suspend(dev);
3071out:
3072 return ret;
3073}
3074
3075static int sdhci_msm_resume(struct device *dev)
3076{
3077 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303078 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3079 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303080 int ret = 0;
3081
Sahitya Tummala62448d92013-03-12 14:57:46 +05303082 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
3083 ret = mmc_cd_gpio_request(msm_host->mmc,
3084 msm_host->pdata->status_gpio);
3085 if (ret)
3086 pr_err("%s: %s: Failed to request card detection IRQ %d\n",
3087 mmc_hostname(host->mmc), __func__, ret);
3088 }
3089
Asutosh Dasbbc84782013-02-11 15:31:35 +05303090 if (pm_runtime_suspended(dev)) {
3091 pr_debug("%s: %s: runtime suspended, defer system resume\n",
3092 mmc_hostname(host->mmc), __func__);
3093 goto out;
3094 }
3095
3096 return sdhci_msm_runtime_resume(dev);
3097out:
3098 return ret;
3099}
3100#endif
3101
3102#ifdef CONFIG_PM
3103static const struct dev_pm_ops sdhci_msm_pmops = {
3104 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
3105 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
3106 NULL)
3107};
3108
3109#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
3110
3111#else
3112#define SDHCI_PM_OPS NULL
3113#endif
Asutosh Das33a4ff52012-12-18 16:14:02 +05303114static const struct of_device_id sdhci_msm_dt_match[] = {
3115 {.compatible = "qcom,sdhci-msm"},
Sujit Reddy Thummae5594822013-11-28 08:51:19 +05303116 { /* sentinel */ }
Asutosh Das33a4ff52012-12-18 16:14:02 +05303117};
3118MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
3119
3120static struct platform_driver sdhci_msm_driver = {
3121 .probe = sdhci_msm_probe,
3122 .remove = __devexit_p(sdhci_msm_remove),
3123 .driver = {
3124 .name = "sdhci_msm",
3125 .owner = THIS_MODULE,
3126 .of_match_table = sdhci_msm_dt_match,
Asutosh Dasbbc84782013-02-11 15:31:35 +05303127 .pm = SDHCI_MSM_PMOPS,
Asutosh Das33a4ff52012-12-18 16:14:02 +05303128 },
3129};
3130
3131module_platform_driver(sdhci_msm_driver);
3132
3133MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
3134MODULE_LICENSE("GPL v2");