blob: 57728e83a8442ddbb5abd31b1b5d9971f330c2c6 [file] [log] [blame]
Asutosh Das33a4ff52012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm MSM SDHCI Platform
3 * driver source file
4 *
5 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
34#include <linux/mmc/mmc.h>
Asutosh Dasbbc84782013-02-11 15:31:35 +053035#include <linux/pm.h>
36#include <linux/pm_runtime.h>
Sahitya Tummala62448d92013-03-12 14:57:46 +053037#include <linux/mmc/cd-gpio.h>
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +053038#include <linux/dma-mapping.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070039#include <mach/gpio.h>
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +053040#include <mach/msm_bus.h>
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +030041#include <linux/iopoll.h>
Asutosh Das33a4ff52012-12-18 16:14:02 +053042
43#include "sdhci-pltfm.h"
44
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -070045#define SDHCI_VER_100 0x2B
Asutosh Das33a4ff52012-12-18 16:14:02 +053046#define CORE_HC_MODE 0x78
47#define HC_MODE_EN 0x1
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070048#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das33a4ff52012-12-18 16:14:02 +053049
50#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
53#define CORE_PWRCTL_STATUS 0xDC
54#define CORE_PWRCTL_MASK 0xE0
55#define CORE_PWRCTL_CLEAR 0xE4
56#define CORE_PWRCTL_CTL 0xE8
57
58#define CORE_PWRCTL_BUS_OFF 0x01
59#define CORE_PWRCTL_BUS_ON (1 << 1)
60#define CORE_PWRCTL_IO_LOW (1 << 2)
61#define CORE_PWRCTL_IO_HIGH (1 << 3)
62
63#define CORE_PWRCTL_BUS_SUCCESS 0x01
64#define CORE_PWRCTL_BUS_FAIL (1 << 1)
65#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
66#define CORE_PWRCTL_IO_FAIL (1 << 3)
67
68#define INT_MASK 0xF
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070069#define MAX_PHASES 16
70
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070071#define CORE_DLL_CONFIG 0x100
72#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070073#define CORE_DLL_EN (1 << 16)
74#define CORE_CDR_EN (1 << 17)
75#define CORE_CK_OUT_EN (1 << 18)
76#define CORE_CDR_EXT_EN (1 << 19)
77#define CORE_DLL_PDN (1 << 29)
78#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070079
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070080#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070081#define CORE_DLL_LOCK (1 << 7)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070082
83#define CORE_VENDOR_SPEC 0x10C
84#define CORE_CLK_PWRSAVE (1 << 1)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070085#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
86#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
87#define CORE_HC_MCLK_SEL_MASK (3 << 8)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070088#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070089#define CORE_HC_SELECT_IN_EN (1 << 18)
90#define CORE_HC_SELECT_IN_HS400 (6 << 19)
91#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070092
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070093#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
94#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
95
96#define CORE_CSR_CDC_CTLR_CFG0 0x130
97#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
98#define CORE_HW_AUTOCAL_ENA (1 << 17)
99
100#define CORE_CSR_CDC_CTLR_CFG1 0x134
101#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
102#define CORE_TIMER_ENA (1 << 16)
103
104#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
105#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
106#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
107#define CORE_CDC_OFFSET_CFG 0x14C
108#define CORE_CSR_CDC_DELAY_CFG 0x150
109#define CORE_CDC_SLAVE_DDA_CFG 0x160
110#define CORE_CSR_CDC_STATUS0 0x164
111#define CORE_CALIBRATION_DONE (1 << 0)
112
113#define CORE_CDC_ERROR_CODE_MASK 0x7000000
114
115#define CORE_CSR_CDC_GEN_CFG 0x178
116#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
117#define CORE_CDC_SWITCH_RC_EN (1 << 1)
118
119#define CORE_DDR_200_CFG 0x184
120#define CORE_CDC_T4_DLY_SEL (1 << 0)
121#define CORE_START_CDC_TRAFFIC (1 << 6)
122
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300123#define CORE_MCI_DATA_CTRL 0x2C
124#define CORE_MCI_DPSM_ENABLE (1 << 0)
125
126#define CORE_TESTBUS_CONFIG 0x0CC
127#define CORE_TESTBUS_ENA (1 << 3)
128#define CORE_TESTBUS_SEL2 (1 << 4)
129
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -0700130#define CORE_MCI_VERSION 0x050
131#define CORE_VERSION_310 0x10000011
132
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300133/*
134 * Waiting until end of potential AHB access for data:
135 * 16 AHB cycles (160ns for 100MHz and 320ns for 50MHz) +
136 * delay on AHB (2us) = maximum 2.32us
137 * Taking x10 times margin
138 */
139#define CORE_AHB_DATA_DELAY_US 23
140/* Waiting until end of potential AHB access for descriptor:
141 * Single (1 AHB cycle) + delay on AHB bus = max 2us
142 * INCR4 (4 AHB cycles) + delay on AHB bus = max 2us
143 * Single (1 AHB cycle) + delay on AHB bus = max 2us
144 * Total 8 us delay with margin
145 */
146#define CORE_AHB_DESC_DELAY_US 8
147
148#define CORE_SDCC_DEBUG_REG 0x124
149#define CORE_DEBUG_REG_AHB_HTRANS (3 << 12)
150
Asutosh Das3781bd82013-01-10 21:11:04 +0530151/* 8KB descriptors */
152#define SDHCI_MSM_MAX_SEGMENTS (1 << 13)
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530153#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das3781bd82013-01-10 21:11:04 +0530154
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700155#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
156
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700157#define INVALID_TUNING_PHASE -1
158
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700159static const u32 tuning_block_64[] = {
160 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
161 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
162 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
163 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
164};
165
166static const u32 tuning_block_128[] = {
167 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
168 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
169 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
170 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
171 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
172 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
173 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
174 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
175};
Asutosh Das33a4ff52012-12-18 16:14:02 +0530176
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -0700177static int disable_slots;
178/* root can write, others read */
179module_param(disable_slots, int, S_IRUGO|S_IWUSR);
180
Asutosh Das33a4ff52012-12-18 16:14:02 +0530181/* This structure keeps information per regulator */
182struct sdhci_msm_reg_data {
183 /* voltage regulator handle */
184 struct regulator *reg;
185 /* regulator name */
186 const char *name;
187 /* voltage level to be set */
188 u32 low_vol_level;
189 u32 high_vol_level;
190 /* Load values for low power and high power mode */
191 u32 lpm_uA;
192 u32 hpm_uA;
193
194 /* is this regulator enabled? */
195 bool is_enabled;
196 /* is this regulator needs to be always on? */
197 bool is_always_on;
198 /* is low power mode setting required for this regulator? */
199 bool lpm_sup;
Asutosh Das95afcad2013-06-28 15:03:44 +0530200 bool set_voltage_sup;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530201};
202
203/*
204 * This structure keeps information for all the
205 * regulators required for a SDCC slot.
206 */
207struct sdhci_msm_slot_reg_data {
208 /* keeps VDD/VCC regulator info */
209 struct sdhci_msm_reg_data *vdd_data;
210 /* keeps VDD IO regulator info */
211 struct sdhci_msm_reg_data *vdd_io_data;
212};
213
214struct sdhci_msm_gpio {
215 u32 no;
216 const char *name;
217 bool is_enabled;
218};
219
220struct sdhci_msm_gpio_data {
221 struct sdhci_msm_gpio *gpio;
222 u8 size;
223};
224
Asutosh Das390519d2012-12-21 12:21:42 +0530225struct sdhci_msm_pad_pull {
226 enum msm_tlmm_pull_tgt no;
227 u32 val;
228};
229
230struct sdhci_msm_pad_pull_data {
231 struct sdhci_msm_pad_pull *on;
232 struct sdhci_msm_pad_pull *off;
233 u8 size;
234};
235
236struct sdhci_msm_pad_drv {
237 enum msm_tlmm_hdrive_tgt no;
238 u32 val;
239};
240
241struct sdhci_msm_pad_drv_data {
242 struct sdhci_msm_pad_drv *on;
243 struct sdhci_msm_pad_drv *off;
244 u8 size;
245};
246
247struct sdhci_msm_pad_data {
248 struct sdhci_msm_pad_pull_data *pull;
249 struct sdhci_msm_pad_drv_data *drv;
250};
251
252
Asutosh Das33a4ff52012-12-18 16:14:02 +0530253struct sdhci_msm_pin_data {
254 /*
255 * = 1 if controller pins are using gpios
256 * = 0 if controller has dedicated MSM pads
257 */
Asutosh Das390519d2012-12-21 12:21:42 +0530258 u8 is_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530259 bool cfg_sts;
260 struct sdhci_msm_gpio_data *gpio_data;
Asutosh Das390519d2012-12-21 12:21:42 +0530261 struct sdhci_msm_pad_data *pad_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530262};
263
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530264struct sdhci_msm_bus_voting_data {
265 struct msm_bus_scale_pdata *bus_pdata;
266 unsigned int *bw_vecs;
267 unsigned int bw_vecs_size;
268};
269
Asutosh Das33a4ff52012-12-18 16:14:02 +0530270struct sdhci_msm_pltfm_data {
271 /* Supported UHS-I Modes */
272 u32 caps;
273
274 /* More capabilities */
275 u32 caps2;
276
277 unsigned long mmc_bus_width;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530278 struct sdhci_msm_slot_reg_data *vreg_data;
279 bool nonremovable;
280 struct sdhci_msm_pin_data *pin_data;
Sahitya Tummalab4e84042013-03-10 07:03:17 +0530281 u32 cpu_dma_latency_us;
Sahitya Tummala62448d92013-03-12 14:57:46 +0530282 int status_gpio; /* card detection GPIO that is configured as IRQ */
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530283 struct sdhci_msm_bus_voting_data *voting_data;
Sahitya Tummala00240122013-02-28 19:50:51 +0530284 u32 *sup_clk_table;
285 unsigned char sup_clk_cnt;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530286};
287
288struct sdhci_msm_bus_vote {
289 uint32_t client_handle;
290 uint32_t curr_vote;
291 int min_bw_vote;
292 int max_bw_vote;
293 bool is_max_bw_needed;
294 struct delayed_work vote_work;
295 struct device_attribute max_bus_bw;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530296};
297
298struct sdhci_msm_host {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530299 struct platform_device *pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530300 void __iomem *core_mem; /* MSM SDCC mapped address */
Asutosh Dasbbc84782013-02-11 15:31:35 +0530301 int pwr_irq; /* power irq */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530302 struct clk *clk; /* main SD/MMC bus clock */
303 struct clk *pclk; /* SDHC peripheral bus clock */
304 struct clk *bus_clk; /* SDHC bus voter clock */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700305 struct clk *ff_clk; /* CDC calibration fixed feedback clock */
306 struct clk *sleep_clk; /* CDC calibration sleep clock */
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530307 atomic_t clks_on; /* Set if clocks are enabled */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530308 struct sdhci_msm_pltfm_data *pdata;
309 struct mmc_host *mmc;
310 struct sdhci_pltfm_data sdhci_msm_pdata;
Sahitya Tummala179e7382013-03-20 19:24:01 +0530311 u32 curr_pwr_state;
312 u32 curr_io_level;
313 struct completion pwr_irq_completion;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530314 struct sdhci_msm_bus_vote msm_bus_vote;
Sahitya Tummala3b292c32013-06-20 14:00:18 +0530315 struct device_attribute polling;
Sahitya Tummala00240122013-02-28 19:50:51 +0530316 u32 clk_rate; /* Keeps track of current clock rate that is set */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700317 bool tuning_done;
318 bool calibration_done;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700319 u8 saved_tuning_phase;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530320};
321
322enum vdd_io_level {
323 /* set vdd_io_data->low_vol_level */
324 VDD_IO_LOW,
325 /* set vdd_io_data->high_vol_level */
326 VDD_IO_HIGH,
327 /*
328 * set whatever there in voltage_level (third argument) of
329 * sdhci_msm_set_vdd_io_vol() function.
330 */
331 VDD_IO_SET_LEVEL,
332};
333
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700334/* MSM platform specific tuning */
335static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
336 u8 poll)
337{
338 int rc = 0;
339 u32 wait_cnt = 50;
340 u8 ck_out_en = 0;
341 struct mmc_host *mmc = host->mmc;
342
343 /* poll for CK_OUT_EN bit. max. poll time = 50us */
344 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
345 CORE_CK_OUT_EN);
346
347 while (ck_out_en != poll) {
348 if (--wait_cnt == 0) {
349 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
350 mmc_hostname(mmc), __func__, poll);
351 rc = -ETIMEDOUT;
352 goto out;
353 }
354 udelay(1);
355
356 ck_out_en = !!(readl_relaxed(host->ioaddr +
357 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
358 }
359out:
360 return rc;
361}
362
363static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
364{
365 int rc = 0;
366 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
367 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
368 0x8};
369 unsigned long flags;
370 u32 config;
371 struct mmc_host *mmc = host->mmc;
372
373 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
374 spin_lock_irqsave(&host->lock, flags);
375
376 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
377 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
378 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
379 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
380
381 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
382 rc = msm_dll_poll_ck_out_en(host, 0);
383 if (rc)
384 goto err_out;
385
386 /*
387 * Write the selected DLL clock output phase (0 ... 15)
388 * to CDR_SELEXT bit field of DLL_CONFIG register.
389 */
390 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
391 & ~(0xF << 20))
392 | (grey_coded_phase_table[phase] << 20)),
393 host->ioaddr + CORE_DLL_CONFIG);
394
395 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
396 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
397 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
398
399 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
400 rc = msm_dll_poll_ck_out_en(host, 1);
401 if (rc)
402 goto err_out;
403
404 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
405 config |= CORE_CDR_EN;
406 config &= ~CORE_CDR_EXT_EN;
407 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
408 goto out;
409
410err_out:
411 pr_err("%s: %s: Failed to set DLL phase: %d\n",
412 mmc_hostname(mmc), __func__, phase);
413out:
414 spin_unlock_irqrestore(&host->lock, flags);
415 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
416 return rc;
417}
418
419/*
420 * Find out the greatest range of consecuitive selected
421 * DLL clock output phases that can be used as sampling
422 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700423 * timing mode) or for eMMC4.5 card read operation (in
424 * HS400/HS200 timing mode).
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700425 * Select the 3/4 of the range and configure the DLL with the
426 * selected DLL clock output phase.
427 */
428
429static int msm_find_most_appropriate_phase(struct sdhci_host *host,
430 u8 *phase_table, u8 total_phases)
431{
432 int ret;
433 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
434 u8 phases_per_row[MAX_PHASES] = {0};
435 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
436 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
437 bool phase_0_found = false, phase_15_found = false;
438 struct mmc_host *mmc = host->mmc;
439
440 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
441 if (!total_phases || (total_phases > MAX_PHASES)) {
442 pr_err("%s: %s: invalid argument: total_phases=%d\n",
443 mmc_hostname(mmc), __func__, total_phases);
444 return -EINVAL;
445 }
446
447 for (cnt = 0; cnt < total_phases; cnt++) {
448 ranges[row_index][col_index] = phase_table[cnt];
449 phases_per_row[row_index] += 1;
450 col_index++;
451
452 if ((cnt + 1) == total_phases) {
453 continue;
454 /* check if next phase in phase_table is consecutive or not */
455 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
456 row_index++;
457 col_index = 0;
458 }
459 }
460
461 if (row_index >= MAX_PHASES)
462 return -EINVAL;
463
464 /* Check if phase-0 is present in first valid window? */
465 if (!ranges[0][0]) {
466 phase_0_found = true;
467 phase_0_raw_index = 0;
468 /* Check if cycle exist between 2 valid windows */
469 for (cnt = 1; cnt <= row_index; cnt++) {
470 if (phases_per_row[cnt]) {
471 for (i = 0; i < phases_per_row[cnt]; i++) {
472 if (ranges[cnt][i] == 15) {
473 phase_15_found = true;
474 phase_15_raw_index = cnt;
475 break;
476 }
477 }
478 }
479 }
480 }
481
482 /* If 2 valid windows form cycle then merge them as single window */
483 if (phase_0_found && phase_15_found) {
484 /* number of phases in raw where phase 0 is present */
485 u8 phases_0 = phases_per_row[phase_0_raw_index];
486 /* number of phases in raw where phase 15 is present */
487 u8 phases_15 = phases_per_row[phase_15_raw_index];
488
489 if (phases_0 + phases_15 >= MAX_PHASES)
490 /*
491 * If there are more than 1 phase windows then total
492 * number of phases in both the windows should not be
493 * more than or equal to MAX_PHASES.
494 */
495 return -EINVAL;
496
497 /* Merge 2 cyclic windows */
498 i = phases_15;
499 for (cnt = 0; cnt < phases_0; cnt++) {
500 ranges[phase_15_raw_index][i] =
501 ranges[phase_0_raw_index][cnt];
502 if (++i >= MAX_PHASES)
503 break;
504 }
505
506 phases_per_row[phase_0_raw_index] = 0;
507 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
508 }
509
510 for (cnt = 0; cnt <= row_index; cnt++) {
511 if (phases_per_row[cnt] > curr_max) {
512 curr_max = phases_per_row[cnt];
513 selected_row_index = cnt;
514 }
515 }
516
517 i = ((curr_max * 3) / 4);
518 if (i)
519 i--;
520
521 ret = (int)ranges[selected_row_index][i];
522
523 if (ret >= MAX_PHASES) {
524 ret = -EINVAL;
525 pr_err("%s: %s: invalid phase selected=%d\n",
526 mmc_hostname(mmc), __func__, ret);
527 }
528
529 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
530 return ret;
531}
532
533static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
534{
535 u32 mclk_freq = 0;
536
537 /* Program the MCLK value to MCLK_FREQ bit field */
538 if (host->clock <= 112000000)
539 mclk_freq = 0;
540 else if (host->clock <= 125000000)
541 mclk_freq = 1;
542 else if (host->clock <= 137000000)
543 mclk_freq = 2;
544 else if (host->clock <= 150000000)
545 mclk_freq = 3;
546 else if (host->clock <= 162000000)
547 mclk_freq = 4;
548 else if (host->clock <= 175000000)
549 mclk_freq = 5;
550 else if (host->clock <= 187000000)
551 mclk_freq = 6;
552 else if (host->clock <= 200000000)
553 mclk_freq = 7;
554
555 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
556 & ~(7 << 24)) | (mclk_freq << 24)),
557 host->ioaddr + CORE_DLL_CONFIG);
558}
559
560/* Initialize the DLL (Programmable Delay Line ) */
561static int msm_init_cm_dll(struct sdhci_host *host)
562{
563 struct mmc_host *mmc = host->mmc;
564 int rc = 0;
565 unsigned long flags;
566 u32 wait_cnt;
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530567 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700568
569 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
570 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530571 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
572 CORE_CLK_PWRSAVE);
573 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700574 /*
575 * Make sure that clock is always enabled when DLL
576 * tuning is in progress. Keeping PWRSAVE ON may
577 * turn off the clock. So let's disable the PWRSAVE
578 * here and re-enable it once tuning is completed.
579 */
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530580 if (prev_pwrsave) {
581 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
582 & ~CORE_CLK_PWRSAVE),
583 host->ioaddr + CORE_VENDOR_SPEC);
584 curr_pwrsave = false;
585 }
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700586
587 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
588 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
589 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
590
591 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
592 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
593 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
594 msm_cm_dll_set_freq(host);
595
596 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
597 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
598 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
599
600 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
601 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
602 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
603
604 /* Set DLL_EN bit to 1. */
605 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
606 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
607
608 /* Set CK_OUT_EN bit to 1. */
609 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
610 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
611
612 wait_cnt = 50;
613 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
614 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
615 CORE_DLL_LOCK)) {
616 /* max. wait for 50us sec for LOCK bit to be set */
617 if (--wait_cnt == 0) {
618 pr_err("%s: %s: DLL failed to LOCK\n",
619 mmc_hostname(mmc), __func__);
620 rc = -ETIMEDOUT;
621 goto out;
622 }
623 /* wait for 1us before polling again */
624 udelay(1);
625 }
626
627out:
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530628 /* Restore the correct PWRSAVE state */
629 if (prev_pwrsave ^ curr_pwrsave) {
630 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
631
632 if (prev_pwrsave)
633 reg |= CORE_CLK_PWRSAVE;
634 else
635 reg &= ~CORE_CLK_PWRSAVE;
636
637 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
638 }
639
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700640 spin_unlock_irqrestore(&host->lock, flags);
641 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
642 return rc;
643}
644
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700645static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
646{
647 u32 wait_cnt;
648 int ret = 0;
649 int cdc_err = 0;
650 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
651 struct sdhci_msm_host *msm_host = pltfm_host->priv;
652
653 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
654
655 /*
656 * Retuning in HS400 (DDR mode) will fail, just reset the
657 * tuning block and restore the saved tuning phase.
658 */
659 ret = msm_init_cm_dll(host);
660 if (ret)
661 goto out;
662
663 /* Set the selected phase in delay line hw block */
664 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
665 if (ret)
666 goto out;
667
668 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
669 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
670 | CORE_CMD_DAT_TRACK_SEL),
671 host->ioaddr + CORE_DLL_CONFIG);
672
673 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
674 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
675 & ~CORE_CDC_T4_DLY_SEL),
676 host->ioaddr + CORE_DDR_200_CFG);
677
678 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
679 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
680 & ~CORE_CDC_SWITCH_BYPASS_OFF),
681 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
682
683 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
684 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
685 | CORE_CDC_SWITCH_RC_EN),
686 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
687
688 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
689 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
690 & ~CORE_START_CDC_TRAFFIC),
691 host->ioaddr + CORE_DDR_200_CFG);
692
693 /*
694 * Perform CDC Register Initialization Sequence
695 *
696 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
697 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
698 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
699 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
700 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
701 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
702 * CORE_CSR_CDC_DELAY_CFG 0x3AC
703 * CORE_CDC_OFFSET_CFG 0x0
704 * CORE_CDC_SLAVE_DDA_CFG 0x16334
705 */
706
707 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
708 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
709 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
710 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
711 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
712 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
713 writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
714 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
715 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
716
717 /* CDC HW Calibration */
718
719 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
720 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
721 | CORE_SW_TRIG_FULL_CALIB),
722 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
723
724 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
725 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
726 & ~CORE_SW_TRIG_FULL_CALIB),
727 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
728
729 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
730 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
731 | CORE_HW_AUTOCAL_ENA),
732 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
733
734 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
735 writel_relaxed((readl_relaxed(host->ioaddr +
736 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
737 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
738
739 mb();
740
741 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
742 wait_cnt = 50;
743 while (!(readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
744 & CORE_CALIBRATION_DONE)) {
745 /* max. wait for 50us sec for CALIBRATION_DONE bit to be set */
746 if (--wait_cnt == 0) {
747 pr_err("%s: %s: CDC Calibration was not completed\n",
748 mmc_hostname(host->mmc), __func__);
749 ret = -ETIMEDOUT;
750 goto out;
751 }
752 /* wait for 1us before polling again */
753 udelay(1);
754 }
755
756 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
757 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
758 & CORE_CDC_ERROR_CODE_MASK;
759 if (cdc_err) {
760 pr_err("%s: %s: CDC Error Code %d\n",
761 mmc_hostname(host->mmc), __func__, cdc_err);
762 ret = -EINVAL;
763 goto out;
764 }
765
766 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
767 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
768 | CORE_START_CDC_TRAFFIC),
769 host->ioaddr + CORE_DDR_200_CFG);
770out:
771 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
772 __func__, ret);
773 return ret;
774}
775
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700776int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
777{
778 unsigned long flags;
Sahitya Tummala714e9642013-06-13 10:36:57 +0530779 int tuning_seq_cnt = 3;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700780 u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
781 const u32 *tuning_block_pattern = tuning_block_64;
782 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
783 int rc;
784 struct mmc_host *mmc = host->mmc;
Sahitya Tummala00240122013-02-28 19:50:51 +0530785 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700786 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
787 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala00240122013-02-28 19:50:51 +0530788
789 /*
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700790 * Tuning is required for SDR104, HS200 and HS400 cards and
791 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala00240122013-02-28 19:50:51 +0530792 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700793 if (host->clock <= CORE_FREQ_100MHZ ||
794 !((ios.timing == MMC_TIMING_MMC_HS400) ||
795 (ios.timing == MMC_TIMING_MMC_HS200) ||
796 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala00240122013-02-28 19:50:51 +0530797 return 0;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700798
799 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700800
801 /* CDCLP533 HW calibration is only required for HS400 mode*/
802 if (msm_host->tuning_done && !msm_host->calibration_done &&
803 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
804 rc = sdhci_msm_cdclp533_calibration(host);
805 spin_lock_irqsave(&host->lock, flags);
806 if (!rc)
807 msm_host->calibration_done = true;
808 spin_unlock_irqrestore(&host->lock, flags);
809 goto out;
810 }
811
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700812 spin_lock_irqsave(&host->lock, flags);
813
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700814 if (((opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
815 (opcode == MMC_SEND_TUNING_BLOCK_HS200)) &&
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700816 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
817 tuning_block_pattern = tuning_block_128;
818 size = sizeof(tuning_block_128);
819 }
820 spin_unlock_irqrestore(&host->lock, flags);
821
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700822 data_buf = kmalloc(size, GFP_KERNEL);
823 if (!data_buf) {
824 rc = -ENOMEM;
825 goto out;
826 }
827
Sahitya Tummala714e9642013-06-13 10:36:57 +0530828retry:
829 /* first of all reset the tuning block */
830 rc = msm_init_cm_dll(host);
831 if (rc)
832 goto kfree;
833
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700834 phase = 0;
835 do {
836 struct mmc_command cmd = {0};
837 struct mmc_data data = {0};
838 struct mmc_request mrq = {
839 .cmd = &cmd,
840 .data = &data
841 };
842 struct scatterlist sg;
843
844 /* set the phase in delay line hw block */
845 rc = msm_config_cm_dll_phase(host, phase);
846 if (rc)
847 goto kfree;
848
849 cmd.opcode = opcode;
850 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
851
852 data.blksz = size;
853 data.blocks = 1;
854 data.flags = MMC_DATA_READ;
855 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
856
857 data.sg = &sg;
858 data.sg_len = 1;
859 sg_init_one(&sg, data_buf, size);
860 memset(data_buf, 0, size);
861 mmc_wait_for_req(mmc, &mrq);
862
863 if (!cmd.error && !data.error &&
864 !memcmp(data_buf, tuning_block_pattern, size)) {
865 /* tuning is successful at this tuning point */
866 tuned_phases[tuned_phase_cnt++] = phase;
867 pr_debug("%s: %s: found good phase = %d\n",
868 mmc_hostname(mmc), __func__, phase);
869 }
870 } while (++phase < 16);
871
872 if (tuned_phase_cnt) {
873 rc = msm_find_most_appropriate_phase(host, tuned_phases,
874 tuned_phase_cnt);
875 if (rc < 0)
876 goto kfree;
877 else
878 phase = (u8)rc;
879
880 /*
881 * Finally set the selected phase in delay
882 * line hw block.
883 */
884 rc = msm_config_cm_dll_phase(host, phase);
885 if (rc)
886 goto kfree;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700887 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700888 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
889 mmc_hostname(mmc), __func__, phase);
890 } else {
Sahitya Tummala714e9642013-06-13 10:36:57 +0530891 if (--tuning_seq_cnt)
892 goto retry;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700893 /* tuning failed */
894 pr_err("%s: %s: no tuning point found\n",
895 mmc_hostname(mmc), __func__);
Sahitya Tummala714e9642013-06-13 10:36:57 +0530896 rc = -EIO;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700897 }
898
899kfree:
900 kfree(data_buf);
901out:
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700902 spin_lock_irqsave(&host->lock, flags);
903 if (!rc)
904 msm_host->tuning_done = true;
905 spin_unlock_irqrestore(&host->lock, flags);
906 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700907 return rc;
908}
909
Asutosh Das33a4ff52012-12-18 16:14:02 +0530910static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
911{
912 struct sdhci_msm_gpio_data *curr;
913 int i, ret = 0;
914
915 curr = pdata->pin_data->gpio_data;
916 for (i = 0; i < curr->size; i++) {
917 if (!gpio_is_valid(curr->gpio[i].no)) {
918 ret = -EINVAL;
919 pr_err("%s: Invalid gpio = %d\n", __func__,
920 curr->gpio[i].no);
921 goto free_gpios;
922 }
923 if (enable) {
924 ret = gpio_request(curr->gpio[i].no,
925 curr->gpio[i].name);
926 if (ret) {
927 pr_err("%s: gpio_request(%d, %s) failed %d\n",
928 __func__, curr->gpio[i].no,
929 curr->gpio[i].name, ret);
930 goto free_gpios;
931 }
932 curr->gpio[i].is_enabled = true;
933 } else {
934 gpio_free(curr->gpio[i].no);
935 curr->gpio[i].is_enabled = false;
936 }
937 }
938 return ret;
939
940free_gpios:
941 for (i--; i >= 0; i--) {
942 gpio_free(curr->gpio[i].no);
943 curr->gpio[i].is_enabled = false;
944 }
945 return ret;
946}
947
Asutosh Das390519d2012-12-21 12:21:42 +0530948static int sdhci_msm_setup_pad(struct sdhci_msm_pltfm_data *pdata, bool enable)
949{
950 struct sdhci_msm_pad_data *curr;
951 int i;
952
953 curr = pdata->pin_data->pad_data;
954 for (i = 0; i < curr->drv->size; i++) {
955 if (enable)
956 msm_tlmm_set_hdrive(curr->drv->on[i].no,
957 curr->drv->on[i].val);
958 else
959 msm_tlmm_set_hdrive(curr->drv->off[i].no,
960 curr->drv->off[i].val);
961 }
962
963 for (i = 0; i < curr->pull->size; i++) {
964 if (enable)
965 msm_tlmm_set_pull(curr->pull->on[i].no,
966 curr->pull->on[i].val);
967 else
968 msm_tlmm_set_pull(curr->pull->off[i].no,
969 curr->pull->off[i].val);
970 }
971
972 return 0;
973}
974
Asutosh Das33a4ff52012-12-18 16:14:02 +0530975static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
976{
977 int ret = 0;
978
979 if (!pdata->pin_data || (pdata->pin_data->cfg_sts == enable))
980 return 0;
Asutosh Das390519d2012-12-21 12:21:42 +0530981 if (pdata->pin_data->is_gpio)
982 ret = sdhci_msm_setup_gpio(pdata, enable);
983 else
984 ret = sdhci_msm_setup_pad(pdata, enable);
Asutosh Das33a4ff52012-12-18 16:14:02 +0530985
Asutosh Das33a4ff52012-12-18 16:14:02 +0530986 if (!ret)
987 pdata->pin_data->cfg_sts = enable;
988
989 return ret;
990}
991
Asutosh Das390519d2012-12-21 12:21:42 +0530992static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
993 u32 **out, int *len, u32 size)
994{
995 int ret = 0;
996 struct device_node *np = dev->of_node;
997 size_t sz;
998 u32 *arr = NULL;
999
1000 if (!of_get_property(np, prop_name, len)) {
1001 ret = -EINVAL;
1002 goto out;
1003 }
1004 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001005 if (sz <= 0 || (size > 0 && (sz > size))) {
Asutosh Das390519d2012-12-21 12:21:42 +05301006 dev_err(dev, "%s invalid size\n", prop_name);
1007 ret = -EINVAL;
1008 goto out;
1009 }
1010
1011 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1012 if (!arr) {
1013 dev_err(dev, "%s failed allocating memory\n", prop_name);
1014 ret = -ENOMEM;
1015 goto out;
1016 }
1017
1018 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1019 if (ret < 0) {
1020 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1021 goto out;
1022 }
1023 *out = arr;
1024out:
1025 if (ret)
1026 *len = 0;
1027 return ret;
1028}
1029
Asutosh Das33a4ff52012-12-18 16:14:02 +05301030#define MAX_PROP_SIZE 32
1031static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1032 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1033{
1034 int len, ret = 0;
1035 const __be32 *prop;
1036 char prop_name[MAX_PROP_SIZE];
1037 struct sdhci_msm_reg_data *vreg;
1038 struct device_node *np = dev->of_node;
1039
1040 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1041 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Das95afcad2013-06-28 15:03:44 +05301042 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301043 return ret;
1044 }
1045
1046 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1047 if (!vreg) {
1048 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1049 ret = -ENOMEM;
1050 return ret;
1051 }
1052
1053 vreg->name = vreg_name;
1054
1055 snprintf(prop_name, MAX_PROP_SIZE,
1056 "qcom,%s-always-on", vreg_name);
1057 if (of_get_property(np, prop_name, NULL))
1058 vreg->is_always_on = true;
1059
1060 snprintf(prop_name, MAX_PROP_SIZE,
1061 "qcom,%s-lpm-sup", vreg_name);
1062 if (of_get_property(np, prop_name, NULL))
1063 vreg->lpm_sup = true;
1064
1065 snprintf(prop_name, MAX_PROP_SIZE,
1066 "qcom,%s-voltage-level", vreg_name);
1067 prop = of_get_property(np, prop_name, &len);
1068 if (!prop || (len != (2 * sizeof(__be32)))) {
1069 dev_warn(dev, "%s %s property\n",
1070 prop ? "invalid format" : "no", prop_name);
1071 } else {
1072 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1073 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1074 }
1075
1076 snprintf(prop_name, MAX_PROP_SIZE,
1077 "qcom,%s-current-level", vreg_name);
1078 prop = of_get_property(np, prop_name, &len);
1079 if (!prop || (len != (2 * sizeof(__be32)))) {
1080 dev_warn(dev, "%s %s property\n",
1081 prop ? "invalid format" : "no", prop_name);
1082 } else {
1083 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1084 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1085 }
1086
1087 *vreg_data = vreg;
1088 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1089 vreg->name, vreg->is_always_on ? "always_on," : "",
1090 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1091 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1092
1093 return ret;
1094}
1095
Asutosh Das390519d2012-12-21 12:21:42 +05301096/* GPIO/Pad data extraction */
1097static int sdhci_msm_dt_get_pad_pull_info(struct device *dev, int id,
1098 struct sdhci_msm_pad_pull_data **pad_pull_data)
1099{
1100 int ret = 0, base = 0, len, i;
1101 u32 *tmp;
1102 struct sdhci_msm_pad_pull_data *pull_data;
1103 struct sdhci_msm_pad_pull *pull;
1104
1105 switch (id) {
1106 case 1:
1107 base = TLMM_PULL_SDC1_CLK;
1108 break;
1109 case 2:
1110 base = TLMM_PULL_SDC2_CLK;
1111 break;
1112 case 3:
1113 base = TLMM_PULL_SDC3_CLK;
1114 break;
1115 case 4:
1116 base = TLMM_PULL_SDC4_CLK;
1117 break;
1118 default:
1119 dev_err(dev, "%s: Invalid slot id\n", __func__);
1120 ret = -EINVAL;
1121 goto out;
1122 }
1123
1124 pull_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_pull_data),
1125 GFP_KERNEL);
1126 if (!pull_data) {
1127 dev_err(dev, "No memory for msm_mmc_pad_pull_data\n");
1128 ret = -ENOMEM;
1129 goto out;
1130 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001131 pull_data->size = 4; /* array size for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301132
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001133 /* Allocate on, off configs for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301134 pull = devm_kzalloc(dev, 2 * pull_data->size *\
1135 sizeof(struct sdhci_msm_pad_pull), GFP_KERNEL);
1136 if (!pull) {
1137 dev_err(dev, "No memory for msm_mmc_pad_pull\n");
1138 ret = -ENOMEM;
1139 goto out;
1140 }
1141 pull_data->on = pull;
1142 pull_data->off = pull + pull_data->size;
1143
1144 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-on",
1145 &tmp, &len, pull_data->size);
1146 if (ret)
1147 goto out;
1148
1149 for (i = 0; i < len; i++) {
1150 pull_data->on[i].no = base + i;
1151 pull_data->on[i].val = tmp[i];
1152 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1153 i, pull_data->on[i].val);
1154 }
1155
1156 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-off",
1157 &tmp, &len, pull_data->size);
1158 if (ret)
1159 goto out;
1160
1161 for (i = 0; i < len; i++) {
1162 pull_data->off[i].no = base + i;
1163 pull_data->off[i].val = tmp[i];
1164 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1165 i, pull_data->off[i].val);
1166 }
1167
1168 *pad_pull_data = pull_data;
1169out:
1170 return ret;
1171}
1172
1173static int sdhci_msm_dt_get_pad_drv_info(struct device *dev, int id,
1174 struct sdhci_msm_pad_drv_data **pad_drv_data)
1175{
1176 int ret = 0, base = 0, len, i;
1177 u32 *tmp;
1178 struct sdhci_msm_pad_drv_data *drv_data;
1179 struct sdhci_msm_pad_drv *drv;
1180
1181 switch (id) {
1182 case 1:
1183 base = TLMM_HDRV_SDC1_CLK;
1184 break;
1185 case 2:
1186 base = TLMM_HDRV_SDC2_CLK;
1187 break;
1188 case 3:
1189 base = TLMM_HDRV_SDC3_CLK;
1190 break;
1191 case 4:
1192 base = TLMM_HDRV_SDC4_CLK;
1193 break;
1194 default:
1195 dev_err(dev, "%s: Invalid slot id\n", __func__);
1196 ret = -EINVAL;
1197 goto out;
1198 }
1199
1200 drv_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_drv_data),
1201 GFP_KERNEL);
1202 if (!drv_data) {
1203 dev_err(dev, "No memory for msm_mmc_pad_drv_data\n");
1204 ret = -ENOMEM;
1205 goto out;
1206 }
1207 drv_data->size = 3; /* array size for clk, cmd, data */
1208
1209 /* Allocate on, off configs for clk, cmd, data */
1210 drv = devm_kzalloc(dev, 2 * drv_data->size *\
1211 sizeof(struct sdhci_msm_pad_drv), GFP_KERNEL);
1212 if (!drv) {
1213 dev_err(dev, "No memory msm_mmc_pad_drv\n");
1214 ret = -ENOMEM;
1215 goto out;
1216 }
1217 drv_data->on = drv;
1218 drv_data->off = drv + drv_data->size;
1219
1220 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-on",
1221 &tmp, &len, drv_data->size);
1222 if (ret)
1223 goto out;
1224
1225 for (i = 0; i < len; i++) {
1226 drv_data->on[i].no = base + i;
1227 drv_data->on[i].val = tmp[i];
1228 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1229 i, drv_data->on[i].val);
1230 }
1231
1232 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-off",
1233 &tmp, &len, drv_data->size);
1234 if (ret)
1235 goto out;
1236
1237 for (i = 0; i < len; i++) {
1238 drv_data->off[i].no = base + i;
1239 drv_data->off[i].val = tmp[i];
1240 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1241 i, drv_data->off[i].val);
1242 }
1243
1244 *pad_drv_data = drv_data;
1245out:
1246 return ret;
1247}
1248
Asutosh Das33a4ff52012-12-18 16:14:02 +05301249#define GPIO_NAME_MAX_LEN 32
1250static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1251 struct sdhci_msm_pltfm_data *pdata)
1252{
Asutosh Das390519d2012-12-21 12:21:42 +05301253 int ret = 0, id = 0, cnt, i;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301254 struct sdhci_msm_pin_data *pin_data;
1255 struct device_node *np = dev->of_node;
1256
1257 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1258 if (!pin_data) {
1259 dev_err(dev, "No memory for pin_data\n");
1260 ret = -ENOMEM;
1261 goto out;
1262 }
1263
1264 cnt = of_gpio_count(np);
1265 if (cnt > 0) {
Asutosh Das390519d2012-12-21 12:21:42 +05301266 pin_data->is_gpio = true;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301267 pin_data->gpio_data = devm_kzalloc(dev,
1268 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1269 if (!pin_data->gpio_data) {
1270 dev_err(dev, "No memory for gpio_data\n");
1271 ret = -ENOMEM;
1272 goto out;
1273 }
1274 pin_data->gpio_data->size = cnt;
1275 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1276 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1277
1278 if (!pin_data->gpio_data->gpio) {
1279 dev_err(dev, "No memory for gpio\n");
1280 ret = -ENOMEM;
1281 goto out;
1282 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301283 for (i = 0; i < cnt; i++) {
1284 const char *name = NULL;
1285 char result[GPIO_NAME_MAX_LEN];
1286 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1287 of_property_read_string_index(np,
1288 "qcom,gpio-names", i, &name);
1289
1290 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1291 dev_name(dev), name ? name : "?");
1292 pin_data->gpio_data->gpio[i].name = result;
1293 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
Asutosh Das390519d2012-12-21 12:21:42 +05301294 pin_data->gpio_data->gpio[i].name,
1295 pin_data->gpio_data->gpio[i].no);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301296 }
Asutosh Das390519d2012-12-21 12:21:42 +05301297 } else {
1298 pin_data->pad_data =
1299 devm_kzalloc(dev,
1300 sizeof(struct sdhci_msm_pad_data),
1301 GFP_KERNEL);
1302 if (!pin_data->pad_data) {
1303 dev_err(dev,
1304 "No memory for pin_data->pad_data\n");
1305 ret = -ENOMEM;
1306 goto out;
1307 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301308
Asutosh Das390519d2012-12-21 12:21:42 +05301309 ret = of_alias_get_id(np, "sdhc");
1310 if (ret < 0) {
1311 dev_err(dev, "Failed to get slot index %d\n", ret);
1312 goto out;
1313 }
1314 id = ret;
1315
1316 ret = sdhci_msm_dt_get_pad_pull_info(
1317 dev, id, &pin_data->pad_data->pull);
1318 if (ret)
1319 goto out;
1320 ret = sdhci_msm_dt_get_pad_drv_info(
1321 dev, id, &pin_data->pad_data->drv);
1322 if (ret)
1323 goto out;
1324
1325 }
1326 pdata->pin_data = pin_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301327out:
1328 if (ret)
1329 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1330 return ret;
1331}
1332
1333/* Parse platform data */
1334static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev)
1335{
1336 struct sdhci_msm_pltfm_data *pdata = NULL;
1337 struct device_node *np = dev->of_node;
1338 u32 bus_width = 0;
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301339 u32 cpu_dma_latency;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301340 int len, i;
Sahitya Tummala00240122013-02-28 19:50:51 +05301341 int clk_table_len;
1342 u32 *clk_table = NULL;
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301343 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301344
1345 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1346 if (!pdata) {
1347 dev_err(dev, "failed to allocate memory for platform data\n");
1348 goto out;
1349 }
1350
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301351 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1352 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1353 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala62448d92013-03-12 14:57:46 +05301354
Asutosh Das33a4ff52012-12-18 16:14:02 +05301355 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1356 if (bus_width == 8)
1357 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1358 else if (bus_width == 4)
1359 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1360 else {
1361 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1362 pdata->mmc_bus_width = 0;
1363 }
1364
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301365 if (!of_property_read_u32(np, "qcom,cpu-dma-latency-us",
1366 &cpu_dma_latency))
1367 pdata->cpu_dma_latency_us = cpu_dma_latency;
1368
Sahitya Tummala00240122013-02-28 19:50:51 +05301369 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1370 &clk_table, &clk_table_len, 0)) {
1371 dev_err(dev, "failed parsing supported clock rates\n");
1372 goto out;
1373 }
1374 if (!clk_table || !clk_table_len) {
1375 dev_err(dev, "Invalid clock table\n");
1376 goto out;
1377 }
1378 pdata->sup_clk_table = clk_table;
1379 pdata->sup_clk_cnt = clk_table_len;
1380
Asutosh Das33a4ff52012-12-18 16:14:02 +05301381 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1382 sdhci_msm_slot_reg_data),
1383 GFP_KERNEL);
1384 if (!pdata->vreg_data) {
1385 dev_err(dev, "failed to allocate memory for vreg data\n");
1386 goto out;
1387 }
1388
1389 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1390 "vdd")) {
1391 dev_err(dev, "failed parsing vdd data\n");
1392 goto out;
1393 }
1394 if (sdhci_msm_dt_parse_vreg_info(dev,
1395 &pdata->vreg_data->vdd_io_data,
1396 "vdd-io")) {
1397 dev_err(dev, "failed parsing vdd-io data\n");
1398 goto out;
1399 }
1400
1401 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1402 dev_err(dev, "failed parsing gpio data\n");
1403 goto out;
1404 }
1405
Asutosh Das33a4ff52012-12-18 16:14:02 +05301406 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1407
1408 for (i = 0; i < len; i++) {
1409 const char *name = NULL;
1410
1411 of_property_read_string_index(np,
1412 "qcom,bus-speed-mode", i, &name);
1413 if (!name)
1414 continue;
1415
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001416 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1417 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1418 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1419 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1420 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das33a4ff52012-12-18 16:14:02 +05301421 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1422 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1423 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1424 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1425 pdata->caps |= MMC_CAP_1_8V_DDR
1426 | MMC_CAP_UHS_DDR50;
1427 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1428 pdata->caps |= MMC_CAP_1_2V_DDR
1429 | MMC_CAP_UHS_DDR50;
1430 }
1431
1432 if (of_get_property(np, "qcom,nonremovable", NULL))
1433 pdata->nonremovable = true;
1434
1435 return pdata;
1436out:
1437 return NULL;
1438}
1439
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301440/* Returns required bandwidth in Bytes per Sec */
1441static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1442 struct mmc_ios *ios)
1443{
Sahitya Tummala53aff982013-04-03 18:03:31 +05301444 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1445 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1446
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301447 unsigned int bw;
1448
Sahitya Tummala53aff982013-04-03 18:03:31 +05301449 bw = msm_host->clk_rate;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301450 /*
1451 * For DDR mode, SDCC controller clock will be at
1452 * the double rate than the actual clock that goes to card.
1453 */
1454 if (ios->bus_width == MMC_BUS_WIDTH_4)
1455 bw /= 2;
1456 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1457 bw /= 8;
1458
1459 return bw;
1460}
1461
1462static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1463 unsigned int bw)
1464{
1465 unsigned int *table = host->pdata->voting_data->bw_vecs;
1466 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1467 int i;
1468
1469 if (host->msm_bus_vote.is_max_bw_needed && bw)
1470 return host->msm_bus_vote.max_bw_vote;
1471
1472 for (i = 0; i < size; i++) {
1473 if (bw <= table[i])
1474 break;
1475 }
1476
1477 if (i && (i == size))
1478 i--;
1479
1480 return i;
1481}
1482
1483/*
1484 * This function must be called with host lock acquired.
1485 * Caller of this function should also ensure that msm bus client
1486 * handle is not null.
1487 */
1488static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1489 int vote,
1490 unsigned long flags)
1491{
1492 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1493 int rc = 0;
1494
1495 if (vote != msm_host->msm_bus_vote.curr_vote) {
1496 spin_unlock_irqrestore(&host->lock, flags);
1497 rc = msm_bus_scale_client_update_request(
1498 msm_host->msm_bus_vote.client_handle, vote);
1499 spin_lock_irqsave(&host->lock, flags);
1500 if (rc) {
1501 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1502 mmc_hostname(host->mmc),
1503 msm_host->msm_bus_vote.client_handle, vote, rc);
1504 goto out;
1505 }
1506 msm_host->msm_bus_vote.curr_vote = vote;
1507 }
1508out:
1509 return rc;
1510}
1511
1512/*
1513 * Internal work. Work to set 0 bandwidth for msm bus.
1514 */
1515static void sdhci_msm_bus_work(struct work_struct *work)
1516{
1517 struct sdhci_msm_host *msm_host;
1518 struct sdhci_host *host;
1519 unsigned long flags;
1520
1521 msm_host = container_of(work, struct sdhci_msm_host,
1522 msm_bus_vote.vote_work.work);
1523 host = platform_get_drvdata(msm_host->pdev);
1524
1525 if (!msm_host->msm_bus_vote.client_handle)
1526 return;
1527
1528 spin_lock_irqsave(&host->lock, flags);
1529 /* don't vote for 0 bandwidth if any request is in progress */
1530 if (!host->mrq) {
1531 sdhci_msm_bus_set_vote(msm_host,
1532 msm_host->msm_bus_vote.min_bw_vote, flags);
1533 } else
1534 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1535 mmc_hostname(host->mmc), __func__);
1536 spin_unlock_irqrestore(&host->lock, flags);
1537}
1538
1539/*
1540 * This function cancels any scheduled delayed work and sets the bus
1541 * vote based on bw (bandwidth) argument.
1542 */
1543static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1544 unsigned int bw)
1545{
1546 int vote;
1547 unsigned long flags;
1548 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1549 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1550
1551 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1552 spin_lock_irqsave(&host->lock, flags);
1553 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
1554 sdhci_msm_bus_set_vote(msm_host, vote, flags);
1555 spin_unlock_irqrestore(&host->lock, flags);
1556}
1557
1558#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1559
1560/* This function queues a work which will set the bandwidth requiement to 0 */
1561static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1562{
1563 unsigned long flags;
1564 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1565 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1566
1567 spin_lock_irqsave(&host->lock, flags);
1568 if (msm_host->msm_bus_vote.min_bw_vote !=
1569 msm_host->msm_bus_vote.curr_vote)
1570 queue_delayed_work(system_nrt_wq,
1571 &msm_host->msm_bus_vote.vote_work,
1572 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1573 spin_unlock_irqrestore(&host->lock, flags);
1574}
1575
1576static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1577 struct platform_device *pdev)
1578{
1579 int rc = 0;
1580 struct msm_bus_scale_pdata *bus_pdata;
1581
1582 struct sdhci_msm_bus_voting_data *data;
1583 struct device *dev = &pdev->dev;
1584
1585 data = devm_kzalloc(dev,
1586 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1587 if (!data) {
1588 dev_err(&pdev->dev,
1589 "%s: failed to allocate memory\n", __func__);
1590 rc = -ENOMEM;
1591 goto out;
1592 }
1593 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1594 if (data->bus_pdata) {
1595 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1596 &data->bw_vecs, &data->bw_vecs_size, 0);
1597 if (rc) {
1598 dev_err(&pdev->dev,
1599 "%s: Failed to get bus-bw-vectors-bps\n",
1600 __func__);
1601 goto out;
1602 }
1603 host->pdata->voting_data = data;
1604 }
1605 if (host->pdata->voting_data &&
1606 host->pdata->voting_data->bus_pdata &&
1607 host->pdata->voting_data->bw_vecs &&
1608 host->pdata->voting_data->bw_vecs_size) {
1609
1610 bus_pdata = host->pdata->voting_data->bus_pdata;
1611 host->msm_bus_vote.client_handle =
1612 msm_bus_scale_register_client(bus_pdata);
1613 if (!host->msm_bus_vote.client_handle) {
1614 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1615 rc = -EFAULT;
1616 goto out;
1617 }
1618 /* cache the vote index for minimum and maximum bandwidth */
1619 host->msm_bus_vote.min_bw_vote =
1620 sdhci_msm_bus_get_vote_for_bw(host, 0);
1621 host->msm_bus_vote.max_bw_vote =
1622 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1623 } else {
1624 devm_kfree(dev, data);
1625 }
1626
1627out:
1628 return rc;
1629}
1630
1631static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1632{
1633 if (host->msm_bus_vote.client_handle)
1634 msm_bus_scale_unregister_client(
1635 host->msm_bus_vote.client_handle);
1636}
1637
1638static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1639{
1640 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1641 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1642 struct mmc_ios *ios = &host->mmc->ios;
1643 unsigned int bw;
1644
1645 if (!msm_host->msm_bus_vote.client_handle)
1646 return;
1647
1648 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301649 if (enable) {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301650 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301651 } else {
1652 /*
1653 * If clock gating is enabled, then remove the vote
1654 * immediately because clocks will be disabled only
1655 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1656 * additional delay is required to remove the bus vote.
1657 */
1658 if (host->mmc->clkgate_delay)
1659 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1660 else
1661 sdhci_msm_bus_queue_work(host);
1662 }
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301663}
1664
Asutosh Das33a4ff52012-12-18 16:14:02 +05301665/* Regulator utility functions */
1666static int sdhci_msm_vreg_init_reg(struct device *dev,
1667 struct sdhci_msm_reg_data *vreg)
1668{
1669 int ret = 0;
1670
1671 /* check if regulator is already initialized? */
1672 if (vreg->reg)
1673 goto out;
1674
1675 /* Get the regulator handle */
1676 vreg->reg = devm_regulator_get(dev, vreg->name);
1677 if (IS_ERR(vreg->reg)) {
1678 ret = PTR_ERR(vreg->reg);
1679 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1680 __func__, vreg->name, ret);
1681 goto out;
1682 }
1683
Asutosh Das95afcad2013-06-28 15:03:44 +05301684 if (regulator_count_voltages(vreg->reg) > 0) {
1685 vreg->set_voltage_sup = true;
1686 /* sanity check */
1687 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1688 pr_err("%s: %s invalid constraints specified\n",
1689 __func__, vreg->name);
1690 ret = -EINVAL;
1691 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301692 }
1693
1694out:
1695 return ret;
1696}
1697
1698static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1699{
1700 if (vreg->reg)
1701 devm_regulator_put(vreg->reg);
1702}
1703
1704static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1705 *vreg, int uA_load)
1706{
1707 int ret = 0;
1708
1709 /*
1710 * regulators that do not support regulator_set_voltage also
1711 * do not support regulator_set_optimum_mode
1712 */
Asutosh Das95afcad2013-06-28 15:03:44 +05301713 if (vreg->set_voltage_sup) {
1714 ret = regulator_set_optimum_mode(vreg->reg, uA_load);
1715 if (ret < 0)
1716 pr_err("%s: regulator_set_optimum_mode(reg=%s,uA_load=%d) failed. ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301717 __func__, vreg->name, uA_load, ret);
1718 else
1719 /*
1720 * regulator_set_optimum_mode() can return non zero
1721 * value even for success case.
1722 */
1723 ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301724 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301725 return ret;
1726}
1727
1728static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
1729 int min_uV, int max_uV)
1730{
1731 int ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301732 if (vreg->set_voltage_sup) {
1733 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
1734 if (ret) {
1735 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301736 __func__, vreg->name, min_uV, max_uV, ret);
1737 }
Asutosh Das95afcad2013-06-28 15:03:44 +05301738 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301739
1740 return ret;
1741}
1742
1743static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
1744{
1745 int ret = 0;
1746
1747 /* Put regulator in HPM (high power mode) */
1748 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
1749 if (ret < 0)
1750 return ret;
1751
1752 if (!vreg->is_enabled) {
1753 /* Set voltage level */
1754 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
1755 vreg->high_vol_level);
1756 if (ret)
1757 return ret;
1758 }
1759 ret = regulator_enable(vreg->reg);
1760 if (ret) {
1761 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
1762 __func__, vreg->name, ret);
1763 return ret;
1764 }
1765 vreg->is_enabled = true;
1766 return ret;
1767}
1768
1769static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
1770{
1771 int ret = 0;
1772
1773 /* Never disable regulator marked as always_on */
1774 if (vreg->is_enabled && !vreg->is_always_on) {
1775 ret = regulator_disable(vreg->reg);
1776 if (ret) {
1777 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
1778 __func__, vreg->name, ret);
1779 goto out;
1780 }
1781 vreg->is_enabled = false;
1782
1783 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
1784 if (ret < 0)
1785 goto out;
1786
1787 /* Set min. voltage level to 0 */
1788 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
1789 if (ret)
1790 goto out;
1791 } else if (vreg->is_enabled && vreg->is_always_on) {
1792 if (vreg->lpm_sup) {
1793 /* Put always_on regulator in LPM (low power mode) */
1794 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
1795 vreg->lpm_uA);
1796 if (ret < 0)
1797 goto out;
1798 }
1799 }
1800out:
1801 return ret;
1802}
1803
1804static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
1805 bool enable, bool is_init)
1806{
1807 int ret = 0, i;
1808 struct sdhci_msm_slot_reg_data *curr_slot;
1809 struct sdhci_msm_reg_data *vreg_table[2];
1810
1811 curr_slot = pdata->vreg_data;
1812 if (!curr_slot) {
1813 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
1814 __func__);
1815 goto out;
1816 }
1817
1818 vreg_table[0] = curr_slot->vdd_data;
1819 vreg_table[1] = curr_slot->vdd_io_data;
1820
1821 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
1822 if (vreg_table[i]) {
1823 if (enable)
1824 ret = sdhci_msm_vreg_enable(vreg_table[i]);
1825 else
1826 ret = sdhci_msm_vreg_disable(vreg_table[i]);
1827 if (ret)
1828 goto out;
1829 }
1830 }
1831out:
1832 return ret;
1833}
1834
1835/*
1836 * Reset vreg by ensuring it is off during probe. A call
1837 * to enable vreg is needed to balance disable vreg
1838 */
1839static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
1840{
1841 int ret;
1842
1843 ret = sdhci_msm_setup_vreg(pdata, 1, true);
1844 if (ret)
1845 return ret;
1846 ret = sdhci_msm_setup_vreg(pdata, 0, true);
1847 return ret;
1848}
1849
1850/* This init function should be called only once for each SDHC slot */
1851static int sdhci_msm_vreg_init(struct device *dev,
1852 struct sdhci_msm_pltfm_data *pdata,
1853 bool is_init)
1854{
1855 int ret = 0;
1856 struct sdhci_msm_slot_reg_data *curr_slot;
1857 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
1858
1859 curr_slot = pdata->vreg_data;
1860 if (!curr_slot)
1861 goto out;
1862
1863 curr_vdd_reg = curr_slot->vdd_data;
1864 curr_vdd_io_reg = curr_slot->vdd_io_data;
1865
1866 if (!is_init)
1867 /* Deregister all regulators from regulator framework */
1868 goto vdd_io_reg_deinit;
1869
1870 /*
1871 * Get the regulator handle from voltage regulator framework
1872 * and then try to set the voltage level for the regulator
1873 */
1874 if (curr_vdd_reg) {
1875 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
1876 if (ret)
1877 goto out;
1878 }
1879 if (curr_vdd_io_reg) {
1880 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
1881 if (ret)
1882 goto vdd_reg_deinit;
1883 }
1884 ret = sdhci_msm_vreg_reset(pdata);
1885 if (ret)
1886 dev_err(dev, "vreg reset failed (%d)\n", ret);
1887 goto out;
1888
1889vdd_io_reg_deinit:
1890 if (curr_vdd_io_reg)
1891 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
1892vdd_reg_deinit:
1893 if (curr_vdd_reg)
1894 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
1895out:
1896 return ret;
1897}
1898
1899
1900static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
1901 enum vdd_io_level level,
1902 unsigned int voltage_level)
1903{
1904 int ret = 0;
1905 int set_level;
1906 struct sdhci_msm_reg_data *vdd_io_reg;
1907
1908 if (!pdata->vreg_data)
1909 return ret;
1910
1911 vdd_io_reg = pdata->vreg_data->vdd_io_data;
1912 if (vdd_io_reg && vdd_io_reg->is_enabled) {
1913 switch (level) {
1914 case VDD_IO_LOW:
1915 set_level = vdd_io_reg->low_vol_level;
1916 break;
1917 case VDD_IO_HIGH:
1918 set_level = vdd_io_reg->high_vol_level;
1919 break;
1920 case VDD_IO_SET_LEVEL:
1921 set_level = voltage_level;
1922 break;
1923 default:
1924 pr_err("%s: invalid argument level = %d",
1925 __func__, level);
1926 ret = -EINVAL;
1927 return ret;
1928 }
1929 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
1930 set_level);
1931 }
1932 return ret;
1933}
1934
1935static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1936{
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07001937 struct sdhci_host *host = (struct sdhci_host *)data;
1938 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1939 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301940 u8 irq_status = 0;
1941 u8 irq_ack = 0;
1942 int ret = 0;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301943 int pwr_state = 0, io_level = 0;
1944 unsigned long flags;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301945
1946 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
1947 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
1948 mmc_hostname(msm_host->mmc), irq, irq_status);
1949
1950 /* Clear the interrupt */
1951 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
1952 /*
1953 * SDHC has core_mem and hc_mem device memory and these memory
1954 * addresses do not fall within 1KB region. Hence, any update to
1955 * core_mem address space would require an mb() to ensure this gets
1956 * completed before its next update to registers within hc_mem.
1957 */
1958 mb();
1959
1960 /* Handle BUS ON/OFF*/
1961 if (irq_status & CORE_PWRCTL_BUS_ON) {
1962 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301963 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301964 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301965 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1966 VDD_IO_HIGH, 0);
1967 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301968 if (ret)
1969 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1970 else
1971 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301972
1973 pwr_state = REQ_BUS_ON;
1974 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301975 }
1976 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1977 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301978 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301979 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301980 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1981 VDD_IO_LOW, 0);
1982 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301983 if (ret)
1984 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1985 else
1986 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301987
1988 pwr_state = REQ_BUS_OFF;
1989 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301990 }
1991 /* Handle IO LOW/HIGH */
1992 if (irq_status & CORE_PWRCTL_IO_LOW) {
1993 /* Switch voltage Low */
1994 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
1995 if (ret)
1996 irq_ack |= CORE_PWRCTL_IO_FAIL;
1997 else
1998 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301999
2000 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302001 }
2002 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2003 /* Switch voltage High */
2004 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2005 if (ret)
2006 irq_ack |= CORE_PWRCTL_IO_FAIL;
2007 else
2008 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302009
2010 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302011 }
2012
2013 /* ACK status to the core */
2014 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2015 /*
2016 * SDHC has core_mem and hc_mem device memory and these memory
2017 * addresses do not fall within 1KB region. Hence, any update to
2018 * core_mem address space would require an mb() to ensure this gets
2019 * completed before its next update to registers within hc_mem.
2020 */
2021 mb();
2022
Sahitya Tummala179e7382013-03-20 19:24:01 +05302023 if (io_level & REQ_IO_HIGH)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002024 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2025 ~CORE_IO_PAD_PWR_SWITCH),
2026 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302027 else if (io_level & REQ_IO_LOW)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002028 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2029 CORE_IO_PAD_PWR_SWITCH),
2030 host->ioaddr + CORE_VENDOR_SPEC);
2031 mb();
2032
Asutosh Das33a4ff52012-12-18 16:14:02 +05302033 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2034 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302035 spin_lock_irqsave(&host->lock, flags);
2036 if (pwr_state)
2037 msm_host->curr_pwr_state = pwr_state;
2038 if (io_level)
2039 msm_host->curr_io_level = io_level;
2040 complete(&msm_host->pwr_irq_completion);
2041 spin_unlock_irqrestore(&host->lock, flags);
2042
Asutosh Das33a4ff52012-12-18 16:14:02 +05302043 return IRQ_HANDLED;
2044}
2045
2046/* This function returns the max. current supported by VDD rail in mA */
2047static unsigned int sdhci_msm_get_vreg_vdd_max_current(struct sdhci_msm_host
2048 *host)
2049{
2050 struct sdhci_msm_slot_reg_data *curr_slot = host->pdata->vreg_data;
2051 if (!curr_slot)
2052 return 0;
2053 if (curr_slot->vdd_data)
2054 return curr_slot->vdd_data->hpm_uA / 1000;
2055 else
2056 return 0;
2057}
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302058
2059static ssize_t
2060show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2061{
2062 struct sdhci_host *host = dev_get_drvdata(dev);
2063 int poll;
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&host->lock, flags);
2067 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2068 spin_unlock_irqrestore(&host->lock, flags);
2069
2070 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2071}
2072
2073static ssize_t
2074store_polling(struct device *dev, struct device_attribute *attr,
2075 const char *buf, size_t count)
2076{
2077 struct sdhci_host *host = dev_get_drvdata(dev);
2078 int value;
2079 unsigned long flags;
2080
2081 if (!kstrtou32(buf, 0, &value)) {
2082 spin_lock_irqsave(&host->lock, flags);
2083 if (value) {
2084 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2085 mmc_detect_change(host->mmc, 0);
2086 } else {
2087 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2088 }
2089 spin_unlock_irqrestore(&host->lock, flags);
2090 }
2091 return count;
2092}
2093
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302094static ssize_t
2095show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2096 char *buf)
2097{
2098 struct sdhci_host *host = dev_get_drvdata(dev);
2099 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2100 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2101
2102 return snprintf(buf, PAGE_SIZE, "%u\n",
2103 msm_host->msm_bus_vote.is_max_bw_needed);
2104}
2105
2106static ssize_t
2107store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2108 const char *buf, size_t count)
2109{
2110 struct sdhci_host *host = dev_get_drvdata(dev);
2111 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2112 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2113 uint32_t value;
2114 unsigned long flags;
2115
2116 if (!kstrtou32(buf, 0, &value)) {
2117 spin_lock_irqsave(&host->lock, flags);
2118 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2119 spin_unlock_irqrestore(&host->lock, flags);
2120 }
2121 return count;
2122}
Asutosh Das33a4ff52012-12-18 16:14:02 +05302123
Sahitya Tummala179e7382013-03-20 19:24:01 +05302124static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das33a4ff52012-12-18 16:14:02 +05302125{
2126 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2127 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302128 unsigned long flags;
2129 bool done = false;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302130
Sahitya Tummala179e7382013-03-20 19:24:01 +05302131 spin_lock_irqsave(&host->lock, flags);
2132 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2133 mmc_hostname(host->mmc), __func__, req_type,
2134 msm_host->curr_pwr_state, msm_host->curr_io_level);
2135 if ((req_type & msm_host->curr_pwr_state) ||
2136 (req_type & msm_host->curr_io_level))
2137 done = true;
2138 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302139
Sahitya Tummala179e7382013-03-20 19:24:01 +05302140 /*
2141 * This is needed here to hanlde a case where IRQ gets
2142 * triggered even before this function is called so that
2143 * x->done counter of completion gets reset. Otherwise,
2144 * next call to wait_for_completion returns immediately
2145 * without actually waiting for the IRQ to be handled.
2146 */
2147 if (done)
2148 init_completion(&msm_host->pwr_irq_completion);
2149 else
2150 wait_for_completion(&msm_host->pwr_irq_completion);
2151
2152 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2153 __func__, req_type);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302154}
2155
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002156static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2157{
2158 if (enable)
2159 writel_relaxed((readl_relaxed(host->ioaddr +
2160 CORE_DLL_CONFIG) | CORE_CDR_EN),
2161 host->ioaddr + CORE_DLL_CONFIG);
2162 else
2163 writel_relaxed((readl_relaxed(host->ioaddr +
2164 CORE_DLL_CONFIG) & ~CORE_CDR_EN),
2165 host->ioaddr + CORE_DLL_CONFIG);
2166}
2167
Asutosh Das3781bd82013-01-10 21:11:04 +05302168static unsigned int sdhci_msm_max_segs(void)
2169{
2170 return SDHCI_MSM_MAX_SEGMENTS;
2171}
2172
Sahitya Tummala00240122013-02-28 19:50:51 +05302173static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302174{
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302175 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2176 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302177
Sahitya Tummala00240122013-02-28 19:50:51 +05302178 return msm_host->pdata->sup_clk_table[0];
2179}
2180
2181static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2182{
2183 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2184 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2185 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2186
2187 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2188}
2189
2190static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2191 u32 req_clk)
2192{
2193 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2194 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2195 unsigned int sel_clk = -1;
2196 unsigned char cnt;
2197
2198 if (req_clk < sdhci_msm_get_min_clock(host)) {
2199 sel_clk = sdhci_msm_get_min_clock(host);
2200 return sel_clk;
2201 }
2202
2203 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2204 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2205 break;
2206 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2207 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2208 break;
2209 } else {
2210 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2211 }
2212 }
2213 return sel_clk;
2214}
2215
2216static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2217{
2218 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2219 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2220 int rc = 0;
2221
2222 if (enable && !atomic_read(&msm_host->clks_on)) {
2223 pr_debug("%s: request to enable clocks\n",
2224 mmc_hostname(host->mmc));
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302225
2226 sdhci_msm_bus_voting(host, 1);
2227
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302228 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2229 rc = clk_prepare_enable(msm_host->bus_clk);
2230 if (rc) {
2231 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2232 mmc_hostname(host->mmc), __func__, rc);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302233 goto remove_vote;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302234 }
2235 }
2236 if (!IS_ERR(msm_host->pclk)) {
2237 rc = clk_prepare_enable(msm_host->pclk);
2238 if (rc) {
2239 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2240 mmc_hostname(host->mmc), __func__, rc);
2241 goto disable_bus_clk;
2242 }
2243 }
2244 rc = clk_prepare_enable(msm_host->clk);
2245 if (rc) {
2246 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2247 mmc_hostname(host->mmc), __func__, rc);
2248 goto disable_pclk;
2249 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002250 if (!IS_ERR(msm_host->ff_clk)) {
2251 rc = clk_prepare_enable(msm_host->ff_clk);
2252 if (rc) {
2253 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2254 mmc_hostname(host->mmc), __func__, rc);
2255 goto disable_clk;
2256 }
2257 }
2258 if (!IS_ERR(msm_host->sleep_clk)) {
2259 rc = clk_prepare_enable(msm_host->sleep_clk);
2260 if (rc) {
2261 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2262 mmc_hostname(host->mmc), __func__, rc);
2263 goto disable_ff_clk;
2264 }
2265 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302266 mb();
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302267
Sahitya Tummala00240122013-02-28 19:50:51 +05302268 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302269 pr_debug("%s: request to disable clocks\n",
2270 mmc_hostname(host->mmc));
2271 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2272 mb();
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002273 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2274 clk_disable_unprepare(msm_host->sleep_clk);
2275 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2276 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302277 clk_disable_unprepare(msm_host->clk);
2278 if (!IS_ERR(msm_host->pclk))
2279 clk_disable_unprepare(msm_host->pclk);
2280 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2281 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302282
2283 sdhci_msm_bus_voting(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302284 }
Sahitya Tummala00240122013-02-28 19:50:51 +05302285 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302286 goto out;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002287disable_ff_clk:
2288 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2289 clk_disable_unprepare(msm_host->ff_clk);
2290disable_clk:
2291 if (!IS_ERR_OR_NULL(msm_host->clk))
2292 clk_disable_unprepare(msm_host->clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302293disable_pclk:
2294 if (!IS_ERR_OR_NULL(msm_host->pclk))
2295 clk_disable_unprepare(msm_host->pclk);
2296disable_bus_clk:
2297 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2298 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302299remove_vote:
2300 if (msm_host->msm_bus_vote.client_handle)
2301 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302302out:
Sahitya Tummala00240122013-02-28 19:50:51 +05302303 return rc;
2304}
2305
2306static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2307{
2308 int rc;
2309 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2310 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2311 struct mmc_ios curr_ios = host->mmc->ios;
2312 u32 sup_clock, ddr_clock;
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302313 bool curr_pwrsave;
Sahitya Tummala00240122013-02-28 19:50:51 +05302314
2315 if (!clock) {
2316 sdhci_msm_prepare_clocks(host, false);
2317 host->clock = clock;
2318 return;
2319 }
2320
2321 rc = sdhci_msm_prepare_clocks(host, true);
2322 if (rc)
2323 return;
2324
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302325 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2326 CORE_CLK_PWRSAVE);
2327 if ((msm_host->clk_rate > 400000) &&
2328 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
2329 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2330 | CORE_CLK_PWRSAVE,
2331 host->ioaddr + CORE_VENDOR_SPEC);
2332 /*
2333 * Disable pwrsave for a newly added card if doesn't allow clock
2334 * gating.
2335 */
2336 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
2337 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2338 & ~CORE_CLK_PWRSAVE,
2339 host->ioaddr + CORE_VENDOR_SPEC);
2340
Sahitya Tummala00240122013-02-28 19:50:51 +05302341 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002342 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
2343 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala00240122013-02-28 19:50:51 +05302344 /*
2345 * The SDHC requires internal clock frequency to be double the
2346 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002347 * uses the faster clock(100/400MHz) for some of its parts and
2348 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala00240122013-02-28 19:50:51 +05302349 */
2350 ddr_clock = clock * 2;
2351 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2352 ddr_clock);
2353 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002354
2355 /*
2356 * In general all timing modes are controlled via UHS mode select in
2357 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2358 * their respective modes defined here, hence we use these values.
2359 *
2360 * HS200 - SDR104 (Since they both are equivalent in functionality)
2361 * HS400 - This involves multiple configurations
2362 * Initially SDR104 - when tuning is required as HS200
2363 * Then when switching to DDR @ 400MHz (HS400) we use
2364 * the vendor specific HC_SELECT_IN to control the mode.
2365 *
2366 * In addition to controlling the modes we also need to select the
2367 * correct input clock for DLL depending on the mode.
2368 *
2369 * HS400 - divided clock (free running MCLK/2)
2370 * All other modes - default (free running MCLK)
2371 */
2372 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2373 /* Select the divided clock (free running MCLK/2) */
2374 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2375 & ~CORE_HC_MCLK_SEL_MASK)
2376 | CORE_HC_MCLK_SEL_HS400),
2377 host->ioaddr + CORE_VENDOR_SPEC);
2378 /*
2379 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2380 * register
2381 */
2382 if (msm_host->tuning_done && !msm_host->calibration_done) {
2383 /*
2384 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2385 * field in VENDOR_SPEC_FUNC
2386 */
2387 writel_relaxed((readl_relaxed(host->ioaddr + \
2388 CORE_VENDOR_SPEC)
2389 | CORE_HC_SELECT_IN_HS400
2390 | CORE_HC_SELECT_IN_EN),
2391 host->ioaddr + CORE_VENDOR_SPEC);
2392 }
2393 } else {
2394 /* Select the default clock (free running MCLK) */
2395 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2396 & ~CORE_HC_MCLK_SEL_MASK)
2397 | CORE_HC_MCLK_SEL_DFLT),
2398 host->ioaddr + CORE_VENDOR_SPEC);
2399
2400 /*
2401 * Disable HC_SELECT_IN to be able to use the UHS mode select
2402 * configuration from Host Control2 register for all other
2403 * modes.
2404 *
2405 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2406 * in VENDOR_SPEC_FUNC
2407 */
2408 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2409 & ~CORE_HC_SELECT_IN_EN
2410 & ~CORE_HC_SELECT_IN_MASK),
2411 host->ioaddr + CORE_VENDOR_SPEC);
2412 }
2413 mb();
2414
Sahitya Tummala00240122013-02-28 19:50:51 +05302415 if (sup_clock != msm_host->clk_rate) {
2416 pr_debug("%s: %s: setting clk rate to %u\n",
2417 mmc_hostname(host->mmc), __func__, sup_clock);
2418 rc = clk_set_rate(msm_host->clk, sup_clock);
2419 if (rc) {
2420 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2421 mmc_hostname(host->mmc), __func__,
2422 sup_clock, rc);
2423 return;
2424 }
2425 msm_host->clk_rate = sup_clock;
2426 host->clock = clock;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302427 /*
2428 * Update the bus vote in case of frequency change due to
2429 * clock scaling.
2430 */
2431 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302432 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302433}
2434
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302435static int sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2436 unsigned int uhs)
2437{
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002438 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2439 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302440 u16 ctrl_2;
2441
2442 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2443 /* Select Bus Speed Mode for host */
2444 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002445 if (uhs == MMC_TIMING_MMC_HS400)
2446 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2447 else if (uhs == MMC_TIMING_MMC_HS200)
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302448 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2449 else if (uhs == MMC_TIMING_UHS_SDR12)
2450 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2451 else if (uhs == MMC_TIMING_UHS_SDR25)
2452 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2453 else if (uhs == MMC_TIMING_UHS_SDR50)
2454 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2455 else if (uhs == MMC_TIMING_UHS_SDR104)
2456 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2457 else if (uhs == MMC_TIMING_UHS_DDR50)
2458 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala00240122013-02-28 19:50:51 +05302459 /*
2460 * When clock frquency is less than 100MHz, the feedback clock must be
2461 * provided and DLL must not be used so that tuning can be skipped. To
2462 * provide feedback clock, the mode selection can be any value less
2463 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2464 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002465 if (host->clock <= CORE_FREQ_100MHZ) {
2466 if ((uhs == MMC_TIMING_MMC_HS400) ||
2467 (uhs == MMC_TIMING_MMC_HS200) ||
2468 (uhs == MMC_TIMING_UHS_SDR104))
2469 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala00240122013-02-28 19:50:51 +05302470
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002471 /*
2472 * Make sure DLL is disabled when not required
2473 *
2474 * Write 1 to DLL_RST bit of DLL_CONFIG register
2475 */
2476 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2477 | CORE_DLL_RST),
2478 host->ioaddr + CORE_DLL_CONFIG);
2479
2480 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2481 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2482 | CORE_DLL_PDN),
2483 host->ioaddr + CORE_DLL_CONFIG);
2484 mb();
2485
2486 /*
2487 * The DLL needs to be restored and CDCLP533 recalibrated
2488 * when the clock frequency is set back to 400MHz.
2489 */
2490 msm_host->calibration_done = false;
2491 }
2492
2493 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2494 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302495 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2496
2497 return 0;
2498}
2499
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002500/*
2501 * sdhci_msm_disable_data_xfer - disable undergoing AHB bus data transfer
2502 *
2503 * Write 0 to bit 0 in MCI_DATA_CTL (offset 0x2C) - clearing TxActive bit by
2504 * access to legacy registers. It will stop current burst and prevent start of
2505 * the next on.
2506 *
2507 * Polling CORE_AHB_DATA_DELAY_US timeout, by reading bit 13:12 until they are 0
2508 * in CORE_SDCC_DEBUG_REG (offset 0x124) will validate that AHB burst was
2509 * completed and a new one didn't start.
2510 *
2511 * Waiting for 4us while AHB finishes descriptors fetch.
2512 */
2513static void sdhci_msm_disable_data_xfer(struct sdhci_host *host)
2514{
2515 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2516 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2517 u32 value;
2518 int ret;
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -07002519 u32 version;
2520
2521 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2522 /* Core version 3.1.0 doesn't need this workaround */
2523 if (version == CORE_VERSION_310)
2524 return;
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002525
2526 value = readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CTRL);
2527 value &= ~(u32)CORE_MCI_DPSM_ENABLE;
2528 writel_relaxed(value, msm_host->core_mem + CORE_MCI_DATA_CTRL);
2529
2530 /* Enable the test bus for device slot */
2531 writel_relaxed(CORE_TESTBUS_ENA | CORE_TESTBUS_SEL2,
2532 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2533
2534 ret = readl_poll_timeout_noirq(msm_host->core_mem
2535 + CORE_SDCC_DEBUG_REG, value,
2536 !(value & CORE_DEBUG_REG_AHB_HTRANS),
2537 CORE_AHB_DATA_DELAY_US, 1);
2538 if (ret) {
2539 pr_err("%s: %s: can't stop ongoing AHB bus access by ADMA\n",
2540 mmc_hostname(host->mmc), __func__);
2541 BUG();
2542 }
2543 /* Disable the test bus for device slot */
2544 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
2545 value &= ~CORE_TESTBUS_ENA;
2546 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
2547
2548 udelay(CORE_AHB_DESC_DELAY_US);
2549}
2550
Asutosh Das33a4ff52012-12-18 16:14:02 +05302551static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302552 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302553 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002554 .execute_tuning = sdhci_msm_execute_tuning,
2555 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das3781bd82013-01-10 21:11:04 +05302556 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302557 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala00240122013-02-28 19:50:51 +05302558 .get_min_clock = sdhci_msm_get_min_clock,
2559 .get_max_clock = sdhci_msm_get_max_clock,
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002560 .disable_data_xfer = sdhci_msm_disable_data_xfer,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302561};
2562
2563static int __devinit sdhci_msm_probe(struct platform_device *pdev)
2564{
2565 struct sdhci_host *host;
2566 struct sdhci_pltfm_host *pltfm_host;
2567 struct sdhci_msm_host *msm_host;
2568 struct resource *core_memres = NULL;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302569 int ret = 0, dead = 0;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302570 u32 vdd_max_current;
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002571 u16 host_version;
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302572 u32 pwr, irq_status, irq_ctl;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302573
2574 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
2575 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
2576 GFP_KERNEL);
2577 if (!msm_host) {
2578 ret = -ENOMEM;
2579 goto out;
2580 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302581
2582 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
2583 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata);
2584 if (IS_ERR(host)) {
2585 ret = PTR_ERR(host);
2586 goto out;
2587 }
2588
2589 pltfm_host = sdhci_priv(host);
2590 pltfm_host->priv = msm_host;
2591 msm_host->mmc = host->mmc;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302592 msm_host->pdev = pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302593
2594 /* Extract platform data */
2595 if (pdev->dev.of_node) {
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -07002596 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
2597 if (ret < 0) {
2598 dev_err(&pdev->dev, "Failed to get slot index %d\n",
2599 ret);
2600 goto pltfm_free;
2601 }
2602 if (disable_slots & (1 << (ret - 1))) {
2603 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
2604 ret);
2605 ret = -ENODEV;
2606 goto pltfm_free;
2607 }
2608
Asutosh Das33a4ff52012-12-18 16:14:02 +05302609 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev);
2610 if (!msm_host->pdata) {
2611 dev_err(&pdev->dev, "DT parsing error\n");
2612 goto pltfm_free;
2613 }
2614 } else {
2615 dev_err(&pdev->dev, "No device tree node\n");
2616 goto pltfm_free;
2617 }
2618
2619 /* Setup Clocks */
2620
2621 /* Setup SDCC bus voter clock. */
2622 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
2623 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2624 /* Vote for max. clk rate for max. performance */
2625 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2626 if (ret)
2627 goto pltfm_free;
2628 ret = clk_prepare_enable(msm_host->bus_clk);
2629 if (ret)
2630 goto pltfm_free;
2631 }
2632
2633 /* Setup main peripheral bus clock */
2634 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
2635 if (!IS_ERR(msm_host->pclk)) {
2636 ret = clk_prepare_enable(msm_host->pclk);
2637 if (ret)
2638 goto bus_clk_disable;
2639 }
2640
2641 /* Setup SDC MMC clock */
2642 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
2643 if (IS_ERR(msm_host->clk)) {
2644 ret = PTR_ERR(msm_host->clk);
2645 goto pclk_disable;
2646 }
2647
Sahitya Tummala00240122013-02-28 19:50:51 +05302648 /* Set to the minimum supported clock frequency */
2649 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
2650 if (ret) {
2651 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302652 goto pclk_disable;
Sahitya Tummala00240122013-02-28 19:50:51 +05302653 }
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302654 ret = clk_prepare_enable(msm_host->clk);
2655 if (ret)
2656 goto pclk_disable;
2657
Sahitya Tummala00240122013-02-28 19:50:51 +05302658 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302659 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302660
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002661 /* Setup CDC calibration fixed feedback clock */
2662 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
2663 if (!IS_ERR(msm_host->ff_clk)) {
2664 ret = clk_prepare_enable(msm_host->ff_clk);
2665 if (ret)
2666 goto clk_disable;
2667 }
2668
2669 /* Setup CDC calibration sleep clock */
2670 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
2671 if (!IS_ERR(msm_host->sleep_clk)) {
2672 ret = clk_prepare_enable(msm_host->sleep_clk);
2673 if (ret)
2674 goto ff_clk_disable;
2675 }
2676
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -07002677 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2678
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302679 ret = sdhci_msm_bus_register(msm_host, pdev);
2680 if (ret)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002681 goto sleep_clk_disable;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302682
2683 if (msm_host->msm_bus_vote.client_handle)
2684 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
2685 sdhci_msm_bus_work);
2686 sdhci_msm_bus_voting(host, 1);
2687
Asutosh Das33a4ff52012-12-18 16:14:02 +05302688 /* Setup regulators */
2689 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
2690 if (ret) {
2691 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302692 goto bus_unregister;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302693 }
2694
2695 /* Reset the core and Enable SDHC mode */
2696 core_memres = platform_get_resource_byname(pdev,
2697 IORESOURCE_MEM, "core_mem");
2698 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
2699 resource_size(core_memres));
2700
2701 if (!msm_host->core_mem) {
2702 dev_err(&pdev->dev, "Failed to remap registers\n");
2703 ret = -ENOMEM;
2704 goto vreg_deinit;
2705 }
2706
Stepan Moskovchenkoe0938982013-09-13 22:19:33 -07002707 /* Unset HC_MODE_EN bit in HC_MODE register */
2708 writel_relaxed(0, (msm_host->core_mem + CORE_HC_MODE));
2709
Asutosh Das33a4ff52012-12-18 16:14:02 +05302710 /* Set SW_RST bit in POWER register (Offset 0x0) */
Sahitya Tummalad5d76e72013-04-25 11:50:56 +05302711 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
2712 CORE_SW_RST, msm_host->core_mem + CORE_POWER);
2713 /*
2714 * SW reset can take upto 10HCLK + 15MCLK cycles.
2715 * Calculating based on min clk rates (hclk = 27MHz,
2716 * mclk = 400KHz) it comes to ~40us. Let's poll for
2717 * max. 1ms for reset completion.
2718 */
2719 ret = readl_poll_timeout(msm_host->core_mem + CORE_POWER,
2720 pwr, !(pwr & CORE_SW_RST), 100, 10);
2721
2722 if (ret) {
2723 dev_err(&pdev->dev, "reset failed (%d)\n", ret);
2724 goto vreg_deinit;
2725 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302726 /* Set HC_MODE_EN bit in HC_MODE register */
2727 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
2728
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002729 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
2730 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
2731 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
2732
Asutosh Das33a4ff52012-12-18 16:14:02 +05302733 /*
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302734 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
2735 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
2736 * interrupt in GIC (by registering the interrupt handler), we need to
2737 * ensure that any pending power irq interrupt status is acknowledged
2738 * otherwise power irq interrupt handler would be fired prematurely.
2739 */
2740 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2741 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2742 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
2743 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
2744 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
2745 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
2746 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
2747 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
2748 /*
2749 * Ensure that above writes are propogated before interrupt enablement
2750 * in GIC.
2751 */
2752 mb();
2753
2754 /*
Asutosh Das33a4ff52012-12-18 16:14:02 +05302755 * Following are the deviations from SDHC spec v3.0 -
2756 * 1. Card detection is handled using separate GPIO.
2757 * 2. Bus power control is handled by interacting with PMIC.
2758 */
2759 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
2760 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala00240122013-02-28 19:50:51 +05302761 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
2762 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummalad6a74b02013-02-25 15:50:08 +05302763 host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
Krishna Kondaa20d3362013-04-01 21:01:59 -07002764 host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
Sahitya Tummalad2ae8832013-04-12 11:49:11 +05302765 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummalae6886bd2013-04-12 12:11:20 +05302766 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala4d12d0b2013-04-12 11:59:25 +05302767 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302768
Sahitya Tummalaf667cc12013-06-10 16:32:51 +05302769 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
2770 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
2771
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002772 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -07002773 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2774 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2775 SDHCI_VENDOR_VER_SHIFT));
2776 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
2777 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
2778 /*
2779 * Add 40us delay in interrupt handler when
2780 * operating at initialization frequency(400KHz).
2781 */
2782 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
2783 /*
2784 * Set Software Reset for DAT line in Software
2785 * Reset Register (Bit 2).
2786 */
2787 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
2788 }
2789
2790 /* Setup PWRCTL irq */
Asutosh Dasbbc84782013-02-11 15:31:35 +05302791 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2792 if (msm_host->pwr_irq < 0) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05302793 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302794 msm_host->pwr_irq);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302795 goto vreg_deinit;
2796 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302797 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302798 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002799 dev_name(&pdev->dev), host);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302800 if (ret) {
2801 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302802 msm_host->pwr_irq, ret);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302803 goto vreg_deinit;
2804 }
2805
2806 /* Enable pwr irq interrupts */
2807 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
2808
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302809 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
2810 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
2811
Asutosh Das33a4ff52012-12-18 16:14:02 +05302812 /* Set host capabilities */
2813 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
2814 msm_host->mmc->caps |= msm_host->pdata->caps;
2815
2816 vdd_max_current = sdhci_msm_get_vreg_vdd_max_current(msm_host);
2817 if (vdd_max_current >= 800)
2818 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2819 else if (vdd_max_current >= 600)
2820 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2821 else if (vdd_max_current >= 400)
2822 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2823 else
2824 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2825
2826 if (vdd_max_current > 150)
2827 msm_host->mmc->caps |= MMC_CAP_SET_XPC_180 |
2828 MMC_CAP_SET_XPC_300|
2829 MMC_CAP_SET_XPC_330;
2830
2831 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302832 msm_host->mmc->caps2 |= MMC_CAP2_CORE_RUNTIME_PM;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302833 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR;
2834 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
2835 msm_host->mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC |
2836 MMC_CAP2_DETECT_ON_ERR);
2837 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
2838 msm_host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302839 msm_host->mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
Sahitya Tummala00240122013-02-28 19:50:51 +05302840 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Konstantin Dorfmanfa436d52013-04-17 16:26:11 +03002841 msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
Subhash Jadavani61a52c92013-05-29 15:52:10 +05302842 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Asutosh Das4dc60412013-06-24 18:20:45 +05302843 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302844
2845 if (msm_host->pdata->nonremovable)
2846 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
2847
Sahitya Tummalab4e84042013-03-10 07:03:17 +05302848 host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
2849
Sahitya Tummala179e7382013-03-20 19:24:01 +05302850 init_completion(&msm_host->pwr_irq_completion);
2851
Sahitya Tummala62448d92013-03-12 14:57:46 +05302852 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
2853 ret = mmc_cd_gpio_request(msm_host->mmc,
2854 msm_host->pdata->status_gpio);
2855 if (ret) {
2856 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
2857 __func__, ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302858 goto vreg_deinit;
Sahitya Tummala62448d92013-03-12 14:57:46 +05302859 }
2860 }
2861
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +05302862 if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
2863 host->dma_mask = DMA_BIT_MASK(32);
2864 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2865 } else {
2866 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
2867 }
2868
Asutosh Das33a4ff52012-12-18 16:14:02 +05302869 ret = sdhci_add_host(host);
2870 if (ret) {
2871 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302872 goto free_cd_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302873 }
2874
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302875 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
2876 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
2877 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
2878 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
2879 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
2880 ret = device_create_file(&pdev->dev,
2881 &msm_host->msm_bus_vote.max_bus_bw);
2882 if (ret)
2883 goto remove_host;
2884
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302885 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
2886 msm_host->polling.show = show_polling;
2887 msm_host->polling.store = store_polling;
2888 sysfs_attr_init(&msm_host->polling.attr);
2889 msm_host->polling.attr.name = "polling";
2890 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
2891 ret = device_create_file(&pdev->dev, &msm_host->polling);
2892 if (ret)
2893 goto remove_max_bus_bw_file;
2894 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302895 ret = pm_runtime_set_active(&pdev->dev);
2896 if (ret)
2897 pr_err("%s: %s: pm_runtime_set_active failed: err: %d\n",
2898 mmc_hostname(host->mmc), __func__, ret);
2899 else
2900 pm_runtime_enable(&pdev->dev);
2901
Asutosh Das33a4ff52012-12-18 16:14:02 +05302902 /* Successful initialization */
2903 goto out;
2904
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302905remove_max_bus_bw_file:
2906 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302907remove_host:
2908 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
2909 sdhci_remove_host(host, dead);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302910free_cd_gpio:
2911 if (gpio_is_valid(msm_host->pdata->status_gpio))
2912 mmc_cd_gpio_free(msm_host->mmc);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302913vreg_deinit:
2914 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302915bus_unregister:
2916 if (msm_host->msm_bus_vote.client_handle)
2917 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2918 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002919sleep_clk_disable:
2920 if (!IS_ERR(msm_host->sleep_clk))
2921 clk_disable_unprepare(msm_host->sleep_clk);
2922ff_clk_disable:
2923 if (!IS_ERR(msm_host->ff_clk))
2924 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302925clk_disable:
2926 if (!IS_ERR(msm_host->clk))
2927 clk_disable_unprepare(msm_host->clk);
2928pclk_disable:
2929 if (!IS_ERR(msm_host->pclk))
2930 clk_disable_unprepare(msm_host->pclk);
2931bus_clk_disable:
2932 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2933 clk_disable_unprepare(msm_host->bus_clk);
2934pltfm_free:
2935 sdhci_pltfm_free(pdev);
2936out:
2937 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
2938 return ret;
2939}
2940
2941static int __devexit sdhci_msm_remove(struct platform_device *pdev)
2942{
2943 struct sdhci_host *host = platform_get_drvdata(pdev);
2944 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2945 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2946 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
2947 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2948 0xffffffff);
2949
2950 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302951 if (!gpio_is_valid(msm_host->pdata->status_gpio))
2952 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302953 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302954 sdhci_remove_host(host, dead);
Asutosh Dasbbc84782013-02-11 15:31:35 +05302955 pm_runtime_disable(&pdev->dev);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302956 sdhci_pltfm_free(pdev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302957
2958 if (gpio_is_valid(msm_host->pdata->status_gpio))
2959 mmc_cd_gpio_free(msm_host->mmc);
2960
Asutosh Das33a4ff52012-12-18 16:14:02 +05302961 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302962
Asutosh Das33a4ff52012-12-18 16:14:02 +05302963 if (pdata->pin_data)
Asutosh Das390519d2012-12-21 12:21:42 +05302964 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302965
2966 if (msm_host->msm_bus_vote.client_handle) {
2967 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2968 sdhci_msm_bus_unregister(msm_host);
2969 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302970 return 0;
2971}
2972
Asutosh Dasbbc84782013-02-11 15:31:35 +05302973static int sdhci_msm_runtime_suspend(struct device *dev)
2974{
2975 struct sdhci_host *host = dev_get_drvdata(dev);
2976 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2977 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2978
2979 disable_irq(host->irq);
2980 disable_irq(msm_host->pwr_irq);
2981
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302982 /*
2983 * Remove the vote immediately only if clocks are off in which
2984 * case we might have queued work to remove vote but it may not
2985 * be completed before runtime suspend or system suspend.
2986 */
2987 if (!atomic_read(&msm_host->clks_on)) {
2988 if (msm_host->msm_bus_vote.client_handle)
2989 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2990 }
2991
Asutosh Dasbbc84782013-02-11 15:31:35 +05302992 return 0;
2993}
2994
2995static int sdhci_msm_runtime_resume(struct device *dev)
2996{
2997 struct sdhci_host *host = dev_get_drvdata(dev);
2998 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2999 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3000
3001 enable_irq(msm_host->pwr_irq);
3002 enable_irq(host->irq);
3003
3004 return 0;
3005}
3006
3007#ifdef CONFIG_PM_SLEEP
3008
3009static int sdhci_msm_suspend(struct device *dev)
3010{
3011 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303012 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3013 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303014 int ret = 0;
3015
Sahitya Tummala62448d92013-03-12 14:57:46 +05303016 if (gpio_is_valid(msm_host->pdata->status_gpio))
3017 mmc_cd_gpio_free(msm_host->mmc);
3018
Asutosh Dasbbc84782013-02-11 15:31:35 +05303019 if (pm_runtime_suspended(dev)) {
3020 pr_debug("%s: %s: already runtime suspended\n",
3021 mmc_hostname(host->mmc), __func__);
3022 goto out;
3023 }
3024
3025 return sdhci_msm_runtime_suspend(dev);
3026out:
3027 return ret;
3028}
3029
3030static int sdhci_msm_resume(struct device *dev)
3031{
3032 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303033 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3034 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303035 int ret = 0;
3036
Sahitya Tummala62448d92013-03-12 14:57:46 +05303037 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
3038 ret = mmc_cd_gpio_request(msm_host->mmc,
3039 msm_host->pdata->status_gpio);
3040 if (ret)
3041 pr_err("%s: %s: Failed to request card detection IRQ %d\n",
3042 mmc_hostname(host->mmc), __func__, ret);
3043 }
3044
Asutosh Dasbbc84782013-02-11 15:31:35 +05303045 if (pm_runtime_suspended(dev)) {
3046 pr_debug("%s: %s: runtime suspended, defer system resume\n",
3047 mmc_hostname(host->mmc), __func__);
3048 goto out;
3049 }
3050
3051 return sdhci_msm_runtime_resume(dev);
3052out:
3053 return ret;
3054}
3055#endif
3056
3057#ifdef CONFIG_PM
3058static const struct dev_pm_ops sdhci_msm_pmops = {
3059 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
3060 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
3061 NULL)
3062};
3063
3064#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
3065
3066#else
3067#define SDHCI_PM_OPS NULL
3068#endif
Asutosh Das33a4ff52012-12-18 16:14:02 +05303069static const struct of_device_id sdhci_msm_dt_match[] = {
3070 {.compatible = "qcom,sdhci-msm"},
3071};
3072MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
3073
3074static struct platform_driver sdhci_msm_driver = {
3075 .probe = sdhci_msm_probe,
3076 .remove = __devexit_p(sdhci_msm_remove),
3077 .driver = {
3078 .name = "sdhci_msm",
3079 .owner = THIS_MODULE,
3080 .of_match_table = sdhci_msm_dt_match,
Asutosh Dasbbc84782013-02-11 15:31:35 +05303081 .pm = SDHCI_MSM_PMOPS,
Asutosh Das33a4ff52012-12-18 16:14:02 +05303082 },
3083};
3084
3085module_platform_driver(sdhci_msm_driver);
3086
3087MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
3088MODULE_LICENSE("GPL v2");