blob: 739a2373f7de0afdb82a464a2248b5aa228fc320 [file] [log] [blame]
Asutosh Das33a4ff52012-12-18 16:14:02 +05301/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm MSM SDHCI Platform
3 * driver source file
4 *
Sujit Reddy Thumma0e9ec032014-01-10 10:58:54 +05305 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
Asutosh Das33a4ff52012-12-18 16:14:02 +05306 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/gfp.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regulator/consumer.h>
26#include <linux/types.h>
27#include <linux/input.h>
28#include <linux/platform_device.h>
29#include <linux/wait.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070030#include <linux/io.h>
31#include <linux/delay.h>
32#include <linux/scatterlist.h>
33#include <linux/slab.h>
34#include <linux/mmc/mmc.h>
Asutosh Dasbbc84782013-02-11 15:31:35 +053035#include <linux/pm.h>
36#include <linux/pm_runtime.h>
Sahitya Tummala62448d92013-03-12 14:57:46 +053037#include <linux/mmc/cd-gpio.h>
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +053038#include <linux/dma-mapping.h>
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070039#include <mach/gpio.h>
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +053040#include <mach/msm_bus.h>
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +030041#include <linux/iopoll.h>
Asutosh Das33a4ff52012-12-18 16:14:02 +053042
43#include "sdhci-pltfm.h"
44
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -070045#define SDHCI_VER_100 0x2B
Asutosh Das33a4ff52012-12-18 16:14:02 +053046#define CORE_HC_MODE 0x78
47#define HC_MODE_EN 0x1
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070048#define FF_CLK_SW_RST_DIS (1 << 13)
Asutosh Das33a4ff52012-12-18 16:14:02 +053049
50#define CORE_POWER 0x0
51#define CORE_SW_RST (1 << 7)
52
53#define CORE_PWRCTL_STATUS 0xDC
54#define CORE_PWRCTL_MASK 0xE0
55#define CORE_PWRCTL_CLEAR 0xE4
56#define CORE_PWRCTL_CTL 0xE8
57
58#define CORE_PWRCTL_BUS_OFF 0x01
59#define CORE_PWRCTL_BUS_ON (1 << 1)
60#define CORE_PWRCTL_IO_LOW (1 << 2)
61#define CORE_PWRCTL_IO_HIGH (1 << 3)
62
63#define CORE_PWRCTL_BUS_SUCCESS 0x01
64#define CORE_PWRCTL_BUS_FAIL (1 << 1)
65#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
66#define CORE_PWRCTL_IO_FAIL (1 << 3)
67
68#define INT_MASK 0xF
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070069#define MAX_PHASES 16
70
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070071#define CORE_DLL_CONFIG 0x100
72#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070073#define CORE_DLL_EN (1 << 16)
74#define CORE_CDR_EN (1 << 17)
75#define CORE_CK_OUT_EN (1 << 18)
76#define CORE_CDR_EXT_EN (1 << 19)
77#define CORE_DLL_PDN (1 << 29)
78#define CORE_DLL_RST (1 << 30)
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070079
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070080#define CORE_DLL_STATUS 0x108
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070081#define CORE_DLL_LOCK (1 << 7)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070082
83#define CORE_VENDOR_SPEC 0x10C
84#define CORE_CLK_PWRSAVE (1 << 1)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070085#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
86#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
87#define CORE_HC_MCLK_SEL_MASK (3 << 8)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070088#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -070089#define CORE_HC_SELECT_IN_EN (1 << 18)
90#define CORE_HC_SELECT_IN_HS400 (6 << 19)
91#define CORE_HC_SELECT_IN_MASK (7 << 19)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -070092
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -070093#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
94#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
95
96#define CORE_CSR_CDC_CTLR_CFG0 0x130
97#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
98#define CORE_HW_AUTOCAL_ENA (1 << 17)
99
100#define CORE_CSR_CDC_CTLR_CFG1 0x134
101#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
102#define CORE_TIMER_ENA (1 << 16)
103
104#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
105#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
106#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
107#define CORE_CDC_OFFSET_CFG 0x14C
108#define CORE_CSR_CDC_DELAY_CFG 0x150
109#define CORE_CDC_SLAVE_DDA_CFG 0x160
110#define CORE_CSR_CDC_STATUS0 0x164
111#define CORE_CALIBRATION_DONE (1 << 0)
112
113#define CORE_CDC_ERROR_CODE_MASK 0x7000000
114
115#define CORE_CSR_CDC_GEN_CFG 0x178
116#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
117#define CORE_CDC_SWITCH_RC_EN (1 << 1)
118
119#define CORE_DDR_200_CFG 0x184
120#define CORE_CDC_T4_DLY_SEL (1 << 0)
121#define CORE_START_CDC_TRAFFIC (1 << 6)
122
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300123#define CORE_MCI_DATA_CTRL 0x2C
124#define CORE_MCI_DPSM_ENABLE (1 << 0)
125
126#define CORE_TESTBUS_CONFIG 0x0CC
127#define CORE_TESTBUS_ENA (1 << 3)
128#define CORE_TESTBUS_SEL2 (1 << 4)
129
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -0700130#define CORE_MCI_VERSION 0x050
131#define CORE_VERSION_310 0x10000011
132
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +0300133/*
134 * Waiting until end of potential AHB access for data:
135 * 16 AHB cycles (160ns for 100MHz and 320ns for 50MHz) +
136 * delay on AHB (2us) = maximum 2.32us
137 * Taking x10 times margin
138 */
139#define CORE_AHB_DATA_DELAY_US 23
140/* Waiting until end of potential AHB access for descriptor:
141 * Single (1 AHB cycle) + delay on AHB bus = max 2us
142 * INCR4 (4 AHB cycles) + delay on AHB bus = max 2us
143 * Single (1 AHB cycle) + delay on AHB bus = max 2us
144 * Total 8 us delay with margin
145 */
146#define CORE_AHB_DESC_DELAY_US 8
147
148#define CORE_SDCC_DEBUG_REG 0x124
149#define CORE_DEBUG_REG_AHB_HTRANS (3 << 12)
150
Asutosh Das3781bd82013-01-10 21:11:04 +0530151/* 8KB descriptors */
152#define SDHCI_MSM_MAX_SEGMENTS (1 << 13)
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530153#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
Asutosh Das3781bd82013-01-10 21:11:04 +0530154
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700155#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
156
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700157#define INVALID_TUNING_PHASE -1
158
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700159static const u32 tuning_block_64[] = {
160 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
161 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
162 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
163 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
164};
165
166static const u32 tuning_block_128[] = {
167 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
168 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
169 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
170 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
171 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
172 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
173 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
174 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
175};
Asutosh Das33a4ff52012-12-18 16:14:02 +0530176
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -0700177static int disable_slots;
178/* root can write, others read */
179module_param(disable_slots, int, S_IRUGO|S_IWUSR);
180
Asutosh Das33a4ff52012-12-18 16:14:02 +0530181/* This structure keeps information per regulator */
182struct sdhci_msm_reg_data {
183 /* voltage regulator handle */
184 struct regulator *reg;
185 /* regulator name */
186 const char *name;
187 /* voltage level to be set */
188 u32 low_vol_level;
189 u32 high_vol_level;
190 /* Load values for low power and high power mode */
191 u32 lpm_uA;
192 u32 hpm_uA;
193
194 /* is this regulator enabled? */
195 bool is_enabled;
196 /* is this regulator needs to be always on? */
197 bool is_always_on;
198 /* is low power mode setting required for this regulator? */
199 bool lpm_sup;
Asutosh Das95afcad2013-06-28 15:03:44 +0530200 bool set_voltage_sup;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530201};
202
203/*
204 * This structure keeps information for all the
205 * regulators required for a SDCC slot.
206 */
207struct sdhci_msm_slot_reg_data {
208 /* keeps VDD/VCC regulator info */
209 struct sdhci_msm_reg_data *vdd_data;
210 /* keeps VDD IO regulator info */
211 struct sdhci_msm_reg_data *vdd_io_data;
212};
213
214struct sdhci_msm_gpio {
215 u32 no;
216 const char *name;
217 bool is_enabled;
218};
219
220struct sdhci_msm_gpio_data {
221 struct sdhci_msm_gpio *gpio;
222 u8 size;
223};
224
Asutosh Das390519d2012-12-21 12:21:42 +0530225struct sdhci_msm_pad_pull {
226 enum msm_tlmm_pull_tgt no;
227 u32 val;
228};
229
230struct sdhci_msm_pad_pull_data {
231 struct sdhci_msm_pad_pull *on;
232 struct sdhci_msm_pad_pull *off;
233 u8 size;
234};
235
236struct sdhci_msm_pad_drv {
237 enum msm_tlmm_hdrive_tgt no;
238 u32 val;
239};
240
241struct sdhci_msm_pad_drv_data {
242 struct sdhci_msm_pad_drv *on;
243 struct sdhci_msm_pad_drv *off;
244 u8 size;
245};
246
247struct sdhci_msm_pad_data {
248 struct sdhci_msm_pad_pull_data *pull;
249 struct sdhci_msm_pad_drv_data *drv;
250};
251
252
Asutosh Das33a4ff52012-12-18 16:14:02 +0530253struct sdhci_msm_pin_data {
254 /*
255 * = 1 if controller pins are using gpios
256 * = 0 if controller has dedicated MSM pads
257 */
Asutosh Das390519d2012-12-21 12:21:42 +0530258 u8 is_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530259 bool cfg_sts;
260 struct sdhci_msm_gpio_data *gpio_data;
Asutosh Das390519d2012-12-21 12:21:42 +0530261 struct sdhci_msm_pad_data *pad_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530262};
263
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530264struct sdhci_msm_bus_voting_data {
265 struct msm_bus_scale_pdata *bus_pdata;
266 unsigned int *bw_vecs;
267 unsigned int bw_vecs_size;
268};
269
Asutosh Das33a4ff52012-12-18 16:14:02 +0530270struct sdhci_msm_pltfm_data {
271 /* Supported UHS-I Modes */
272 u32 caps;
273
274 /* More capabilities */
275 u32 caps2;
276
277 unsigned long mmc_bus_width;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530278 struct sdhci_msm_slot_reg_data *vreg_data;
279 bool nonremovable;
280 struct sdhci_msm_pin_data *pin_data;
Sahitya Tummalab4e84042013-03-10 07:03:17 +0530281 u32 cpu_dma_latency_us;
Sahitya Tummala62448d92013-03-12 14:57:46 +0530282 int status_gpio; /* card detection GPIO that is configured as IRQ */
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530283 struct sdhci_msm_bus_voting_data *voting_data;
Sahitya Tummala00240122013-02-28 19:50:51 +0530284 u32 *sup_clk_table;
285 unsigned char sup_clk_cnt;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530286};
287
288struct sdhci_msm_bus_vote {
289 uint32_t client_handle;
290 uint32_t curr_vote;
291 int min_bw_vote;
292 int max_bw_vote;
293 bool is_max_bw_needed;
294 struct delayed_work vote_work;
295 struct device_attribute max_bus_bw;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530296};
297
298struct sdhci_msm_host {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530299 struct platform_device *pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530300 void __iomem *core_mem; /* MSM SDCC mapped address */
Asutosh Dasbbc84782013-02-11 15:31:35 +0530301 int pwr_irq; /* power irq */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530302 struct clk *clk; /* main SD/MMC bus clock */
303 struct clk *pclk; /* SDHC peripheral bus clock */
304 struct clk *bus_clk; /* SDHC bus voter clock */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700305 struct clk *ff_clk; /* CDC calibration fixed feedback clock */
306 struct clk *sleep_clk; /* CDC calibration sleep clock */
Sahitya Tummala04c3a462013-01-11 11:30:45 +0530307 atomic_t clks_on; /* Set if clocks are enabled */
Asutosh Das33a4ff52012-12-18 16:14:02 +0530308 struct sdhci_msm_pltfm_data *pdata;
309 struct mmc_host *mmc;
310 struct sdhci_pltfm_data sdhci_msm_pdata;
Sahitya Tummala179e7382013-03-20 19:24:01 +0530311 u32 curr_pwr_state;
312 u32 curr_io_level;
313 struct completion pwr_irq_completion;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +0530314 struct sdhci_msm_bus_vote msm_bus_vote;
Sahitya Tummala3b292c32013-06-20 14:00:18 +0530315 struct device_attribute polling;
Sahitya Tummala00240122013-02-28 19:50:51 +0530316 u32 clk_rate; /* Keeps track of current clock rate that is set */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700317 bool tuning_done;
318 bool calibration_done;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700319 u8 saved_tuning_phase;
Asutosh Das6c0804b2013-11-08 12:33:47 +0530320 atomic_t controller_clock;
Asutosh Das33a4ff52012-12-18 16:14:02 +0530321};
322
323enum vdd_io_level {
324 /* set vdd_io_data->low_vol_level */
325 VDD_IO_LOW,
326 /* set vdd_io_data->high_vol_level */
327 VDD_IO_HIGH,
328 /*
329 * set whatever there in voltage_level (third argument) of
330 * sdhci_msm_set_vdd_io_vol() function.
331 */
332 VDD_IO_SET_LEVEL,
333};
334
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700335/* MSM platform specific tuning */
336static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
337 u8 poll)
338{
339 int rc = 0;
340 u32 wait_cnt = 50;
341 u8 ck_out_en = 0;
342 struct mmc_host *mmc = host->mmc;
343
344 /* poll for CK_OUT_EN bit. max. poll time = 50us */
345 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
346 CORE_CK_OUT_EN);
347
348 while (ck_out_en != poll) {
349 if (--wait_cnt == 0) {
350 pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
351 mmc_hostname(mmc), __func__, poll);
352 rc = -ETIMEDOUT;
353 goto out;
354 }
355 udelay(1);
356
357 ck_out_en = !!(readl_relaxed(host->ioaddr +
358 CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
359 }
360out:
361 return rc;
362}
363
364static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
365{
366 int rc = 0;
367 u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
368 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
369 0x8};
370 unsigned long flags;
371 u32 config;
372 struct mmc_host *mmc = host->mmc;
373
374 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
375 spin_lock_irqsave(&host->lock, flags);
376
377 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
378 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
379 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
380 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
381
382 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
383 rc = msm_dll_poll_ck_out_en(host, 0);
384 if (rc)
385 goto err_out;
386
387 /*
388 * Write the selected DLL clock output phase (0 ... 15)
389 * to CDR_SELEXT bit field of DLL_CONFIG register.
390 */
391 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
392 & ~(0xF << 20))
393 | (grey_coded_phase_table[phase] << 20)),
394 host->ioaddr + CORE_DLL_CONFIG);
395
396 /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
397 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
398 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
399
400 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
401 rc = msm_dll_poll_ck_out_en(host, 1);
402 if (rc)
403 goto err_out;
404
405 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
406 config |= CORE_CDR_EN;
407 config &= ~CORE_CDR_EXT_EN;
408 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
409 goto out;
410
411err_out:
412 pr_err("%s: %s: Failed to set DLL phase: %d\n",
413 mmc_hostname(mmc), __func__, phase);
414out:
415 spin_unlock_irqrestore(&host->lock, flags);
416 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
417 return rc;
418}
419
420/*
421 * Find out the greatest range of consecuitive selected
422 * DLL clock output phases that can be used as sampling
423 * setting for SD3.0 UHS-I card read operation (in SDR104
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700424 * timing mode) or for eMMC4.5 card read operation (in
425 * HS400/HS200 timing mode).
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700426 * Select the 3/4 of the range and configure the DLL with the
427 * selected DLL clock output phase.
428 */
429
430static int msm_find_most_appropriate_phase(struct sdhci_host *host,
431 u8 *phase_table, u8 total_phases)
432{
433 int ret;
434 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
435 u8 phases_per_row[MAX_PHASES] = {0};
436 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
437 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
438 bool phase_0_found = false, phase_15_found = false;
439 struct mmc_host *mmc = host->mmc;
440
441 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
442 if (!total_phases || (total_phases > MAX_PHASES)) {
443 pr_err("%s: %s: invalid argument: total_phases=%d\n",
444 mmc_hostname(mmc), __func__, total_phases);
445 return -EINVAL;
446 }
447
448 for (cnt = 0; cnt < total_phases; cnt++) {
449 ranges[row_index][col_index] = phase_table[cnt];
450 phases_per_row[row_index] += 1;
451 col_index++;
452
453 if ((cnt + 1) == total_phases) {
454 continue;
455 /* check if next phase in phase_table is consecutive or not */
456 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
457 row_index++;
458 col_index = 0;
459 }
460 }
461
462 if (row_index >= MAX_PHASES)
463 return -EINVAL;
464
465 /* Check if phase-0 is present in first valid window? */
466 if (!ranges[0][0]) {
467 phase_0_found = true;
468 phase_0_raw_index = 0;
469 /* Check if cycle exist between 2 valid windows */
470 for (cnt = 1; cnt <= row_index; cnt++) {
471 if (phases_per_row[cnt]) {
472 for (i = 0; i < phases_per_row[cnt]; i++) {
473 if (ranges[cnt][i] == 15) {
474 phase_15_found = true;
475 phase_15_raw_index = cnt;
476 break;
477 }
478 }
479 }
480 }
481 }
482
483 /* If 2 valid windows form cycle then merge them as single window */
484 if (phase_0_found && phase_15_found) {
485 /* number of phases in raw where phase 0 is present */
486 u8 phases_0 = phases_per_row[phase_0_raw_index];
487 /* number of phases in raw where phase 15 is present */
488 u8 phases_15 = phases_per_row[phase_15_raw_index];
489
490 if (phases_0 + phases_15 >= MAX_PHASES)
491 /*
492 * If there are more than 1 phase windows then total
493 * number of phases in both the windows should not be
494 * more than or equal to MAX_PHASES.
495 */
496 return -EINVAL;
497
498 /* Merge 2 cyclic windows */
499 i = phases_15;
500 for (cnt = 0; cnt < phases_0; cnt++) {
501 ranges[phase_15_raw_index][i] =
502 ranges[phase_0_raw_index][cnt];
503 if (++i >= MAX_PHASES)
504 break;
505 }
506
507 phases_per_row[phase_0_raw_index] = 0;
508 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
509 }
510
511 for (cnt = 0; cnt <= row_index; cnt++) {
512 if (phases_per_row[cnt] > curr_max) {
513 curr_max = phases_per_row[cnt];
514 selected_row_index = cnt;
515 }
516 }
517
518 i = ((curr_max * 3) / 4);
519 if (i)
520 i--;
521
522 ret = (int)ranges[selected_row_index][i];
523
524 if (ret >= MAX_PHASES) {
525 ret = -EINVAL;
526 pr_err("%s: %s: invalid phase selected=%d\n",
527 mmc_hostname(mmc), __func__, ret);
528 }
529
530 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
531 return ret;
532}
533
534static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
535{
536 u32 mclk_freq = 0;
537
538 /* Program the MCLK value to MCLK_FREQ bit field */
539 if (host->clock <= 112000000)
540 mclk_freq = 0;
541 else if (host->clock <= 125000000)
542 mclk_freq = 1;
543 else if (host->clock <= 137000000)
544 mclk_freq = 2;
545 else if (host->clock <= 150000000)
546 mclk_freq = 3;
547 else if (host->clock <= 162000000)
548 mclk_freq = 4;
549 else if (host->clock <= 175000000)
550 mclk_freq = 5;
551 else if (host->clock <= 187000000)
552 mclk_freq = 6;
553 else if (host->clock <= 200000000)
554 mclk_freq = 7;
555
556 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
557 & ~(7 << 24)) | (mclk_freq << 24)),
558 host->ioaddr + CORE_DLL_CONFIG);
559}
560
561/* Initialize the DLL (Programmable Delay Line ) */
562static int msm_init_cm_dll(struct sdhci_host *host)
563{
564 struct mmc_host *mmc = host->mmc;
565 int rc = 0;
566 unsigned long flags;
567 u32 wait_cnt;
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530568 bool prev_pwrsave, curr_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700569
570 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
571 spin_lock_irqsave(&host->lock, flags);
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530572 prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
573 CORE_CLK_PWRSAVE);
574 curr_pwrsave = prev_pwrsave;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700575 /*
576 * Make sure that clock is always enabled when DLL
577 * tuning is in progress. Keeping PWRSAVE ON may
578 * turn off the clock. So let's disable the PWRSAVE
579 * here and re-enable it once tuning is completed.
580 */
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530581 if (prev_pwrsave) {
582 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
583 & ~CORE_CLK_PWRSAVE),
584 host->ioaddr + CORE_VENDOR_SPEC);
585 curr_pwrsave = false;
586 }
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700587
588 /* Write 1 to DLL_RST bit of DLL_CONFIG register */
589 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
590 | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
591
592 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
593 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
594 | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
595 msm_cm_dll_set_freq(host);
596
597 /* Write 0 to DLL_RST bit of DLL_CONFIG register */
598 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
599 & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
600
601 /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
602 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
603 & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
604
605 /* Set DLL_EN bit to 1. */
606 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
607 | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
608
609 /* Set CK_OUT_EN bit to 1. */
610 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
611 | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
612
613 wait_cnt = 50;
614 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
615 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
616 CORE_DLL_LOCK)) {
617 /* max. wait for 50us sec for LOCK bit to be set */
618 if (--wait_cnt == 0) {
619 pr_err("%s: %s: DLL failed to LOCK\n",
620 mmc_hostname(mmc), __func__);
621 rc = -ETIMEDOUT;
622 goto out;
623 }
624 /* wait for 1us before polling again */
625 udelay(1);
626 }
627
628out:
Subhash Jadavaniefb5f622013-05-28 18:21:57 +0530629 /* Restore the correct PWRSAVE state */
630 if (prev_pwrsave ^ curr_pwrsave) {
631 u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
632
633 if (prev_pwrsave)
634 reg |= CORE_CLK_PWRSAVE;
635 else
636 reg &= ~CORE_CLK_PWRSAVE;
637
638 writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
639 }
640
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700641 spin_unlock_irqrestore(&host->lock, flags);
642 pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
643 return rc;
644}
645
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700646static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
647{
648 u32 wait_cnt;
649 int ret = 0;
650 int cdc_err = 0;
651 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
652 struct sdhci_msm_host *msm_host = pltfm_host->priv;
653
654 pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
655
656 /*
657 * Retuning in HS400 (DDR mode) will fail, just reset the
658 * tuning block and restore the saved tuning phase.
659 */
660 ret = msm_init_cm_dll(host);
661 if (ret)
662 goto out;
663
664 /* Set the selected phase in delay line hw block */
665 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
666 if (ret)
667 goto out;
668
669 /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
670 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
671 | CORE_CMD_DAT_TRACK_SEL),
672 host->ioaddr + CORE_DLL_CONFIG);
673
674 /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
675 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
676 & ~CORE_CDC_T4_DLY_SEL),
677 host->ioaddr + CORE_DDR_200_CFG);
678
679 /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
680 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
681 & ~CORE_CDC_SWITCH_BYPASS_OFF),
682 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
683
684 /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
685 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
686 | CORE_CDC_SWITCH_RC_EN),
687 host->ioaddr + CORE_CSR_CDC_GEN_CFG);
688
689 /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
690 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
691 & ~CORE_START_CDC_TRAFFIC),
692 host->ioaddr + CORE_DDR_200_CFG);
693
694 /*
695 * Perform CDC Register Initialization Sequence
696 *
697 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
698 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
699 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
700 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
701 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
702 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
703 * CORE_CSR_CDC_DELAY_CFG 0x3AC
704 * CORE_CDC_OFFSET_CFG 0x0
705 * CORE_CDC_SLAVE_DDA_CFG 0x16334
706 */
707
708 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
709 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
710 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
711 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
712 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
713 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
714 writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
715 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
716 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
717
718 /* CDC HW Calibration */
719
720 /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
721 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
722 | CORE_SW_TRIG_FULL_CALIB),
723 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
724
725 /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
726 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
727 & ~CORE_SW_TRIG_FULL_CALIB),
728 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
729
730 /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
731 writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
732 | CORE_HW_AUTOCAL_ENA),
733 host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
734
735 /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
736 writel_relaxed((readl_relaxed(host->ioaddr +
737 CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
738 host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
739
740 mb();
741
742 /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
743 wait_cnt = 50;
744 while (!(readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
745 & CORE_CALIBRATION_DONE)) {
746 /* max. wait for 50us sec for CALIBRATION_DONE bit to be set */
747 if (--wait_cnt == 0) {
748 pr_err("%s: %s: CDC Calibration was not completed\n",
749 mmc_hostname(host->mmc), __func__);
750 ret = -ETIMEDOUT;
751 goto out;
752 }
753 /* wait for 1us before polling again */
754 udelay(1);
755 }
756
757 /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
758 cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
759 & CORE_CDC_ERROR_CODE_MASK;
760 if (cdc_err) {
761 pr_err("%s: %s: CDC Error Code %d\n",
762 mmc_hostname(host->mmc), __func__, cdc_err);
763 ret = -EINVAL;
764 goto out;
765 }
766
767 /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
768 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
769 | CORE_START_CDC_TRAFFIC),
770 host->ioaddr + CORE_DDR_200_CFG);
771out:
772 pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
773 __func__, ret);
774 return ret;
775}
776
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700777int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
778{
779 unsigned long flags;
Sahitya Tummala714e9642013-06-13 10:36:57 +0530780 int tuning_seq_cnt = 3;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700781 u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
782 const u32 *tuning_block_pattern = tuning_block_64;
783 int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
784 int rc;
785 struct mmc_host *mmc = host->mmc;
Sahitya Tummala00240122013-02-28 19:50:51 +0530786 struct mmc_ios ios = host->mmc->ios;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700787 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
788 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala00240122013-02-28 19:50:51 +0530789
790 /*
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700791 * Tuning is required for SDR104, HS200 and HS400 cards and
792 * if clock frequency is greater than 100MHz in these modes.
Sahitya Tummala00240122013-02-28 19:50:51 +0530793 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700794 if (host->clock <= CORE_FREQ_100MHZ ||
795 !((ios.timing == MMC_TIMING_MMC_HS400) ||
796 (ios.timing == MMC_TIMING_MMC_HS200) ||
797 (ios.timing == MMC_TIMING_UHS_SDR104)))
Sahitya Tummala00240122013-02-28 19:50:51 +0530798 return 0;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700799
800 pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700801
802 /* CDCLP533 HW calibration is only required for HS400 mode*/
803 if (msm_host->tuning_done && !msm_host->calibration_done &&
804 (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
805 rc = sdhci_msm_cdclp533_calibration(host);
806 spin_lock_irqsave(&host->lock, flags);
807 if (!rc)
808 msm_host->calibration_done = true;
809 spin_unlock_irqrestore(&host->lock, flags);
810 goto out;
811 }
812
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700813 spin_lock_irqsave(&host->lock, flags);
814
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -0700815 if (((opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
816 (opcode == MMC_SEND_TUNING_BLOCK_HS200)) &&
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700817 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
818 tuning_block_pattern = tuning_block_128;
819 size = sizeof(tuning_block_128);
820 }
821 spin_unlock_irqrestore(&host->lock, flags);
822
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700823 data_buf = kmalloc(size, GFP_KERNEL);
824 if (!data_buf) {
825 rc = -ENOMEM;
826 goto out;
827 }
828
Sahitya Tummala714e9642013-06-13 10:36:57 +0530829retry:
830 /* first of all reset the tuning block */
831 rc = msm_init_cm_dll(host);
832 if (rc)
833 goto kfree;
834
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700835 phase = 0;
836 do {
837 struct mmc_command cmd = {0};
838 struct mmc_data data = {0};
839 struct mmc_request mrq = {
840 .cmd = &cmd,
841 .data = &data
842 };
843 struct scatterlist sg;
844
845 /* set the phase in delay line hw block */
846 rc = msm_config_cm_dll_phase(host, phase);
847 if (rc)
848 goto kfree;
849
850 cmd.opcode = opcode;
851 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
852
853 data.blksz = size;
854 data.blocks = 1;
855 data.flags = MMC_DATA_READ;
856 data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
857
858 data.sg = &sg;
859 data.sg_len = 1;
860 sg_init_one(&sg, data_buf, size);
861 memset(data_buf, 0, size);
862 mmc_wait_for_req(mmc, &mrq);
863
864 if (!cmd.error && !data.error &&
865 !memcmp(data_buf, tuning_block_pattern, size)) {
866 /* tuning is successful at this tuning point */
867 tuned_phases[tuned_phase_cnt++] = phase;
868 pr_debug("%s: %s: found good phase = %d\n",
869 mmc_hostname(mmc), __func__, phase);
870 }
871 } while (++phase < 16);
872
873 if (tuned_phase_cnt) {
874 rc = msm_find_most_appropriate_phase(host, tuned_phases,
875 tuned_phase_cnt);
876 if (rc < 0)
877 goto kfree;
878 else
879 phase = (u8)rc;
880
881 /*
882 * Finally set the selected phase in delay
883 * line hw block.
884 */
885 rc = msm_config_cm_dll_phase(host, phase);
886 if (rc)
887 goto kfree;
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700888 msm_host->saved_tuning_phase = phase;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700889 pr_debug("%s: %s: finally setting the tuning phase to %d\n",
890 mmc_hostname(mmc), __func__, phase);
891 } else {
Sahitya Tummala714e9642013-06-13 10:36:57 +0530892 if (--tuning_seq_cnt)
893 goto retry;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700894 /* tuning failed */
895 pr_err("%s: %s: no tuning point found\n",
896 mmc_hostname(mmc), __func__);
Sahitya Tummala714e9642013-06-13 10:36:57 +0530897 rc = -EIO;
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700898 }
899
900kfree:
901 kfree(data_buf);
902out:
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -0700903 spin_lock_irqsave(&host->lock, flags);
904 if (!rc)
905 msm_host->tuning_done = true;
906 spin_unlock_irqrestore(&host->lock, flags);
907 pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -0700908 return rc;
909}
910
Asutosh Das33a4ff52012-12-18 16:14:02 +0530911static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
912{
913 struct sdhci_msm_gpio_data *curr;
914 int i, ret = 0;
915
916 curr = pdata->pin_data->gpio_data;
917 for (i = 0; i < curr->size; i++) {
918 if (!gpio_is_valid(curr->gpio[i].no)) {
919 ret = -EINVAL;
920 pr_err("%s: Invalid gpio = %d\n", __func__,
921 curr->gpio[i].no);
922 goto free_gpios;
923 }
924 if (enable) {
925 ret = gpio_request(curr->gpio[i].no,
926 curr->gpio[i].name);
927 if (ret) {
928 pr_err("%s: gpio_request(%d, %s) failed %d\n",
929 __func__, curr->gpio[i].no,
930 curr->gpio[i].name, ret);
931 goto free_gpios;
932 }
933 curr->gpio[i].is_enabled = true;
934 } else {
935 gpio_free(curr->gpio[i].no);
936 curr->gpio[i].is_enabled = false;
937 }
938 }
939 return ret;
940
941free_gpios:
942 for (i--; i >= 0; i--) {
943 gpio_free(curr->gpio[i].no);
944 curr->gpio[i].is_enabled = false;
945 }
946 return ret;
947}
948
Asutosh Das390519d2012-12-21 12:21:42 +0530949static int sdhci_msm_setup_pad(struct sdhci_msm_pltfm_data *pdata, bool enable)
950{
951 struct sdhci_msm_pad_data *curr;
952 int i;
953
954 curr = pdata->pin_data->pad_data;
955 for (i = 0; i < curr->drv->size; i++) {
956 if (enable)
957 msm_tlmm_set_hdrive(curr->drv->on[i].no,
958 curr->drv->on[i].val);
959 else
960 msm_tlmm_set_hdrive(curr->drv->off[i].no,
961 curr->drv->off[i].val);
962 }
963
964 for (i = 0; i < curr->pull->size; i++) {
965 if (enable)
966 msm_tlmm_set_pull(curr->pull->on[i].no,
967 curr->pull->on[i].val);
968 else
969 msm_tlmm_set_pull(curr->pull->off[i].no,
970 curr->pull->off[i].val);
971 }
972
973 return 0;
974}
975
Asutosh Das33a4ff52012-12-18 16:14:02 +0530976static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
977{
978 int ret = 0;
979
980 if (!pdata->pin_data || (pdata->pin_data->cfg_sts == enable))
981 return 0;
Asutosh Das390519d2012-12-21 12:21:42 +0530982 if (pdata->pin_data->is_gpio)
983 ret = sdhci_msm_setup_gpio(pdata, enable);
984 else
985 ret = sdhci_msm_setup_pad(pdata, enable);
Asutosh Das33a4ff52012-12-18 16:14:02 +0530986
Asutosh Das33a4ff52012-12-18 16:14:02 +0530987 if (!ret)
988 pdata->pin_data->cfg_sts = enable;
989
990 return ret;
991}
992
Asutosh Das390519d2012-12-21 12:21:42 +0530993static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
994 u32 **out, int *len, u32 size)
995{
996 int ret = 0;
997 struct device_node *np = dev->of_node;
998 size_t sz;
999 u32 *arr = NULL;
1000
1001 if (!of_get_property(np, prop_name, len)) {
1002 ret = -EINVAL;
1003 goto out;
1004 }
1005 sz = *len = *len / sizeof(*arr);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001006 if (sz <= 0 || (size > 0 && (sz > size))) {
Asutosh Das390519d2012-12-21 12:21:42 +05301007 dev_err(dev, "%s invalid size\n", prop_name);
1008 ret = -EINVAL;
1009 goto out;
1010 }
1011
1012 arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
1013 if (!arr) {
1014 dev_err(dev, "%s failed allocating memory\n", prop_name);
1015 ret = -ENOMEM;
1016 goto out;
1017 }
1018
1019 ret = of_property_read_u32_array(np, prop_name, arr, sz);
1020 if (ret < 0) {
1021 dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
1022 goto out;
1023 }
1024 *out = arr;
1025out:
1026 if (ret)
1027 *len = 0;
1028 return ret;
1029}
1030
Asutosh Das33a4ff52012-12-18 16:14:02 +05301031#define MAX_PROP_SIZE 32
1032static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
1033 struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
1034{
1035 int len, ret = 0;
1036 const __be32 *prop;
1037 char prop_name[MAX_PROP_SIZE];
1038 struct sdhci_msm_reg_data *vreg;
1039 struct device_node *np = dev->of_node;
1040
1041 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
1042 if (!of_parse_phandle(np, prop_name, 0)) {
Asutosh Das95afcad2013-06-28 15:03:44 +05301043 dev_info(dev, "No vreg data found for %s\n", vreg_name);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301044 return ret;
1045 }
1046
1047 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
1048 if (!vreg) {
1049 dev_err(dev, "No memory for vreg: %s\n", vreg_name);
1050 ret = -ENOMEM;
1051 return ret;
1052 }
1053
1054 vreg->name = vreg_name;
1055
1056 snprintf(prop_name, MAX_PROP_SIZE,
1057 "qcom,%s-always-on", vreg_name);
1058 if (of_get_property(np, prop_name, NULL))
1059 vreg->is_always_on = true;
1060
1061 snprintf(prop_name, MAX_PROP_SIZE,
1062 "qcom,%s-lpm-sup", vreg_name);
1063 if (of_get_property(np, prop_name, NULL))
1064 vreg->lpm_sup = true;
1065
1066 snprintf(prop_name, MAX_PROP_SIZE,
1067 "qcom,%s-voltage-level", vreg_name);
1068 prop = of_get_property(np, prop_name, &len);
1069 if (!prop || (len != (2 * sizeof(__be32)))) {
1070 dev_warn(dev, "%s %s property\n",
1071 prop ? "invalid format" : "no", prop_name);
1072 } else {
1073 vreg->low_vol_level = be32_to_cpup(&prop[0]);
1074 vreg->high_vol_level = be32_to_cpup(&prop[1]);
1075 }
1076
1077 snprintf(prop_name, MAX_PROP_SIZE,
1078 "qcom,%s-current-level", vreg_name);
1079 prop = of_get_property(np, prop_name, &len);
1080 if (!prop || (len != (2 * sizeof(__be32)))) {
1081 dev_warn(dev, "%s %s property\n",
1082 prop ? "invalid format" : "no", prop_name);
1083 } else {
1084 vreg->lpm_uA = be32_to_cpup(&prop[0]);
1085 vreg->hpm_uA = be32_to_cpup(&prop[1]);
1086 }
1087
1088 *vreg_data = vreg;
1089 dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
1090 vreg->name, vreg->is_always_on ? "always_on," : "",
1091 vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
1092 vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
1093
1094 return ret;
1095}
1096
Asutosh Das390519d2012-12-21 12:21:42 +05301097/* GPIO/Pad data extraction */
1098static int sdhci_msm_dt_get_pad_pull_info(struct device *dev, int id,
1099 struct sdhci_msm_pad_pull_data **pad_pull_data)
1100{
1101 int ret = 0, base = 0, len, i;
1102 u32 *tmp;
1103 struct sdhci_msm_pad_pull_data *pull_data;
1104 struct sdhci_msm_pad_pull *pull;
1105
1106 switch (id) {
1107 case 1:
1108 base = TLMM_PULL_SDC1_CLK;
1109 break;
1110 case 2:
1111 base = TLMM_PULL_SDC2_CLK;
1112 break;
1113 case 3:
1114 base = TLMM_PULL_SDC3_CLK;
1115 break;
1116 case 4:
1117 base = TLMM_PULL_SDC4_CLK;
1118 break;
1119 default:
1120 dev_err(dev, "%s: Invalid slot id\n", __func__);
1121 ret = -EINVAL;
1122 goto out;
1123 }
1124
1125 pull_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_pull_data),
1126 GFP_KERNEL);
1127 if (!pull_data) {
1128 dev_err(dev, "No memory for msm_mmc_pad_pull_data\n");
1129 ret = -ENOMEM;
1130 goto out;
1131 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001132 pull_data->size = 4; /* array size for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301133
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001134 /* Allocate on, off configs for clk, cmd, data and rclk */
Asutosh Das390519d2012-12-21 12:21:42 +05301135 pull = devm_kzalloc(dev, 2 * pull_data->size *\
1136 sizeof(struct sdhci_msm_pad_pull), GFP_KERNEL);
1137 if (!pull) {
1138 dev_err(dev, "No memory for msm_mmc_pad_pull\n");
1139 ret = -ENOMEM;
1140 goto out;
1141 }
1142 pull_data->on = pull;
1143 pull_data->off = pull + pull_data->size;
1144
1145 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-on",
1146 &tmp, &len, pull_data->size);
1147 if (ret)
1148 goto out;
1149
1150 for (i = 0; i < len; i++) {
1151 pull_data->on[i].no = base + i;
1152 pull_data->on[i].val = tmp[i];
1153 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1154 i, pull_data->on[i].val);
1155 }
1156
1157 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-pull-off",
1158 &tmp, &len, pull_data->size);
1159 if (ret)
1160 goto out;
1161
1162 for (i = 0; i < len; i++) {
1163 pull_data->off[i].no = base + i;
1164 pull_data->off[i].val = tmp[i];
1165 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1166 i, pull_data->off[i].val);
1167 }
1168
1169 *pad_pull_data = pull_data;
1170out:
1171 return ret;
1172}
1173
1174static int sdhci_msm_dt_get_pad_drv_info(struct device *dev, int id,
1175 struct sdhci_msm_pad_drv_data **pad_drv_data)
1176{
1177 int ret = 0, base = 0, len, i;
1178 u32 *tmp;
1179 struct sdhci_msm_pad_drv_data *drv_data;
1180 struct sdhci_msm_pad_drv *drv;
1181
1182 switch (id) {
1183 case 1:
1184 base = TLMM_HDRV_SDC1_CLK;
1185 break;
1186 case 2:
1187 base = TLMM_HDRV_SDC2_CLK;
1188 break;
1189 case 3:
1190 base = TLMM_HDRV_SDC3_CLK;
1191 break;
1192 case 4:
1193 base = TLMM_HDRV_SDC4_CLK;
1194 break;
1195 default:
1196 dev_err(dev, "%s: Invalid slot id\n", __func__);
1197 ret = -EINVAL;
1198 goto out;
1199 }
1200
1201 drv_data = devm_kzalloc(dev, sizeof(struct sdhci_msm_pad_drv_data),
1202 GFP_KERNEL);
1203 if (!drv_data) {
1204 dev_err(dev, "No memory for msm_mmc_pad_drv_data\n");
1205 ret = -ENOMEM;
1206 goto out;
1207 }
1208 drv_data->size = 3; /* array size for clk, cmd, data */
1209
1210 /* Allocate on, off configs for clk, cmd, data */
1211 drv = devm_kzalloc(dev, 2 * drv_data->size *\
1212 sizeof(struct sdhci_msm_pad_drv), GFP_KERNEL);
1213 if (!drv) {
1214 dev_err(dev, "No memory msm_mmc_pad_drv\n");
1215 ret = -ENOMEM;
1216 goto out;
1217 }
1218 drv_data->on = drv;
1219 drv_data->off = drv + drv_data->size;
1220
1221 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-on",
1222 &tmp, &len, drv_data->size);
1223 if (ret)
1224 goto out;
1225
1226 for (i = 0; i < len; i++) {
1227 drv_data->on[i].no = base + i;
1228 drv_data->on[i].val = tmp[i];
1229 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1230 i, drv_data->on[i].val);
1231 }
1232
1233 ret = sdhci_msm_dt_get_array(dev, "qcom,pad-drv-off",
1234 &tmp, &len, drv_data->size);
1235 if (ret)
1236 goto out;
1237
1238 for (i = 0; i < len; i++) {
1239 drv_data->off[i].no = base + i;
1240 drv_data->off[i].val = tmp[i];
1241 dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
1242 i, drv_data->off[i].val);
1243 }
1244
1245 *pad_drv_data = drv_data;
1246out:
1247 return ret;
1248}
1249
Asutosh Das33a4ff52012-12-18 16:14:02 +05301250#define GPIO_NAME_MAX_LEN 32
1251static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
1252 struct sdhci_msm_pltfm_data *pdata)
1253{
Asutosh Das390519d2012-12-21 12:21:42 +05301254 int ret = 0, id = 0, cnt, i;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301255 struct sdhci_msm_pin_data *pin_data;
1256 struct device_node *np = dev->of_node;
1257
1258 pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
1259 if (!pin_data) {
1260 dev_err(dev, "No memory for pin_data\n");
1261 ret = -ENOMEM;
1262 goto out;
1263 }
1264
1265 cnt = of_gpio_count(np);
1266 if (cnt > 0) {
Asutosh Das390519d2012-12-21 12:21:42 +05301267 pin_data->is_gpio = true;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301268 pin_data->gpio_data = devm_kzalloc(dev,
1269 sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
1270 if (!pin_data->gpio_data) {
1271 dev_err(dev, "No memory for gpio_data\n");
1272 ret = -ENOMEM;
1273 goto out;
1274 }
1275 pin_data->gpio_data->size = cnt;
1276 pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
1277 sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
1278
1279 if (!pin_data->gpio_data->gpio) {
1280 dev_err(dev, "No memory for gpio\n");
1281 ret = -ENOMEM;
1282 goto out;
1283 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301284 for (i = 0; i < cnt; i++) {
1285 const char *name = NULL;
1286 char result[GPIO_NAME_MAX_LEN];
1287 pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
1288 of_property_read_string_index(np,
1289 "qcom,gpio-names", i, &name);
1290
1291 snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
1292 dev_name(dev), name ? name : "?");
1293 pin_data->gpio_data->gpio[i].name = result;
1294 dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
Asutosh Das390519d2012-12-21 12:21:42 +05301295 pin_data->gpio_data->gpio[i].name,
1296 pin_data->gpio_data->gpio[i].no);
Asutosh Das33a4ff52012-12-18 16:14:02 +05301297 }
Asutosh Das390519d2012-12-21 12:21:42 +05301298 } else {
1299 pin_data->pad_data =
1300 devm_kzalloc(dev,
1301 sizeof(struct sdhci_msm_pad_data),
1302 GFP_KERNEL);
1303 if (!pin_data->pad_data) {
1304 dev_err(dev,
1305 "No memory for pin_data->pad_data\n");
1306 ret = -ENOMEM;
1307 goto out;
1308 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301309
Asutosh Das390519d2012-12-21 12:21:42 +05301310 ret = of_alias_get_id(np, "sdhc");
1311 if (ret < 0) {
1312 dev_err(dev, "Failed to get slot index %d\n", ret);
1313 goto out;
1314 }
1315 id = ret;
1316
1317 ret = sdhci_msm_dt_get_pad_pull_info(
1318 dev, id, &pin_data->pad_data->pull);
1319 if (ret)
1320 goto out;
1321 ret = sdhci_msm_dt_get_pad_drv_info(
1322 dev, id, &pin_data->pad_data->drv);
1323 if (ret)
1324 goto out;
1325
1326 }
1327 pdata->pin_data = pin_data;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301328out:
1329 if (ret)
1330 dev_err(dev, "%s failed with err %d\n", __func__, ret);
1331 return ret;
1332}
1333
1334/* Parse platform data */
1335static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev)
1336{
1337 struct sdhci_msm_pltfm_data *pdata = NULL;
1338 struct device_node *np = dev->of_node;
1339 u32 bus_width = 0;
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301340 u32 cpu_dma_latency;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301341 int len, i;
Sahitya Tummala00240122013-02-28 19:50:51 +05301342 int clk_table_len;
1343 u32 *clk_table = NULL;
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301344 enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301345
1346 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1347 if (!pdata) {
1348 dev_err(dev, "failed to allocate memory for platform data\n");
1349 goto out;
1350 }
1351
Sujit Reddy Thumma4ddff322013-06-03 09:54:32 +05301352 pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
1353 if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
1354 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
Sahitya Tummala62448d92013-03-12 14:57:46 +05301355
Asutosh Das33a4ff52012-12-18 16:14:02 +05301356 of_property_read_u32(np, "qcom,bus-width", &bus_width);
1357 if (bus_width == 8)
1358 pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
1359 else if (bus_width == 4)
1360 pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
1361 else {
1362 dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
1363 pdata->mmc_bus_width = 0;
1364 }
1365
Sahitya Tummalab4e84042013-03-10 07:03:17 +05301366 if (!of_property_read_u32(np, "qcom,cpu-dma-latency-us",
1367 &cpu_dma_latency))
1368 pdata->cpu_dma_latency_us = cpu_dma_latency;
1369
Sahitya Tummala00240122013-02-28 19:50:51 +05301370 if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
1371 &clk_table, &clk_table_len, 0)) {
1372 dev_err(dev, "failed parsing supported clock rates\n");
1373 goto out;
1374 }
1375 if (!clk_table || !clk_table_len) {
1376 dev_err(dev, "Invalid clock table\n");
1377 goto out;
1378 }
1379 pdata->sup_clk_table = clk_table;
1380 pdata->sup_clk_cnt = clk_table_len;
1381
Asutosh Das33a4ff52012-12-18 16:14:02 +05301382 pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
1383 sdhci_msm_slot_reg_data),
1384 GFP_KERNEL);
1385 if (!pdata->vreg_data) {
1386 dev_err(dev, "failed to allocate memory for vreg data\n");
1387 goto out;
1388 }
1389
1390 if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
1391 "vdd")) {
1392 dev_err(dev, "failed parsing vdd data\n");
1393 goto out;
1394 }
1395 if (sdhci_msm_dt_parse_vreg_info(dev,
1396 &pdata->vreg_data->vdd_io_data,
1397 "vdd-io")) {
1398 dev_err(dev, "failed parsing vdd-io data\n");
1399 goto out;
1400 }
1401
1402 if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
1403 dev_err(dev, "failed parsing gpio data\n");
1404 goto out;
1405 }
1406
Asutosh Das33a4ff52012-12-18 16:14:02 +05301407 len = of_property_count_strings(np, "qcom,bus-speed-mode");
1408
1409 for (i = 0; i < len; i++) {
1410 const char *name = NULL;
1411
1412 of_property_read_string_index(np,
1413 "qcom,bus-speed-mode", i, &name);
1414 if (!name)
1415 continue;
1416
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07001417 if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
1418 pdata->caps2 |= MMC_CAP2_HS400_1_8V;
1419 else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
1420 pdata->caps2 |= MMC_CAP2_HS400_1_2V;
1421 else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
Asutosh Das33a4ff52012-12-18 16:14:02 +05301422 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1423 else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
1424 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1425 else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
1426 pdata->caps |= MMC_CAP_1_8V_DDR
1427 | MMC_CAP_UHS_DDR50;
1428 else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
1429 pdata->caps |= MMC_CAP_1_2V_DDR
1430 | MMC_CAP_UHS_DDR50;
1431 }
1432
1433 if (of_get_property(np, "qcom,nonremovable", NULL))
1434 pdata->nonremovable = true;
1435
1436 return pdata;
1437out:
1438 return NULL;
1439}
1440
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301441/* Returns required bandwidth in Bytes per Sec */
1442static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
1443 struct mmc_ios *ios)
1444{
Sahitya Tummala53aff982013-04-03 18:03:31 +05301445 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1446 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1447
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301448 unsigned int bw;
1449
Sahitya Tummala53aff982013-04-03 18:03:31 +05301450 bw = msm_host->clk_rate;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301451 /*
1452 * For DDR mode, SDCC controller clock will be at
1453 * the double rate than the actual clock that goes to card.
1454 */
1455 if (ios->bus_width == MMC_BUS_WIDTH_4)
1456 bw /= 2;
1457 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1458 bw /= 8;
1459
1460 return bw;
1461}
1462
1463static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
1464 unsigned int bw)
1465{
1466 unsigned int *table = host->pdata->voting_data->bw_vecs;
1467 unsigned int size = host->pdata->voting_data->bw_vecs_size;
1468 int i;
1469
1470 if (host->msm_bus_vote.is_max_bw_needed && bw)
1471 return host->msm_bus_vote.max_bw_vote;
1472
1473 for (i = 0; i < size; i++) {
1474 if (bw <= table[i])
1475 break;
1476 }
1477
1478 if (i && (i == size))
1479 i--;
1480
1481 return i;
1482}
1483
1484/*
1485 * This function must be called with host lock acquired.
1486 * Caller of this function should also ensure that msm bus client
1487 * handle is not null.
1488 */
1489static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
1490 int vote,
1491 unsigned long flags)
1492{
1493 struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
1494 int rc = 0;
1495
1496 if (vote != msm_host->msm_bus_vote.curr_vote) {
1497 spin_unlock_irqrestore(&host->lock, flags);
1498 rc = msm_bus_scale_client_update_request(
1499 msm_host->msm_bus_vote.client_handle, vote);
1500 spin_lock_irqsave(&host->lock, flags);
1501 if (rc) {
1502 pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1503 mmc_hostname(host->mmc),
1504 msm_host->msm_bus_vote.client_handle, vote, rc);
1505 goto out;
1506 }
1507 msm_host->msm_bus_vote.curr_vote = vote;
1508 }
1509out:
1510 return rc;
1511}
1512
1513/*
1514 * Internal work. Work to set 0 bandwidth for msm bus.
1515 */
1516static void sdhci_msm_bus_work(struct work_struct *work)
1517{
1518 struct sdhci_msm_host *msm_host;
1519 struct sdhci_host *host;
1520 unsigned long flags;
1521
1522 msm_host = container_of(work, struct sdhci_msm_host,
1523 msm_bus_vote.vote_work.work);
1524 host = platform_get_drvdata(msm_host->pdev);
1525
1526 if (!msm_host->msm_bus_vote.client_handle)
1527 return;
1528
1529 spin_lock_irqsave(&host->lock, flags);
1530 /* don't vote for 0 bandwidth if any request is in progress */
1531 if (!host->mrq) {
1532 sdhci_msm_bus_set_vote(msm_host,
1533 msm_host->msm_bus_vote.min_bw_vote, flags);
1534 } else
1535 pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
1536 mmc_hostname(host->mmc), __func__);
1537 spin_unlock_irqrestore(&host->lock, flags);
1538}
1539
1540/*
1541 * This function cancels any scheduled delayed work and sets the bus
1542 * vote based on bw (bandwidth) argument.
1543 */
1544static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
1545 unsigned int bw)
1546{
1547 int vote;
1548 unsigned long flags;
1549 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1550 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1551
1552 cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
1553 spin_lock_irqsave(&host->lock, flags);
1554 vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
1555 sdhci_msm_bus_set_vote(msm_host, vote, flags);
1556 spin_unlock_irqrestore(&host->lock, flags);
1557}
1558
1559#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
1560
1561/* This function queues a work which will set the bandwidth requiement to 0 */
1562static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
1563{
1564 unsigned long flags;
1565 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1566 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1567
1568 spin_lock_irqsave(&host->lock, flags);
1569 if (msm_host->msm_bus_vote.min_bw_vote !=
1570 msm_host->msm_bus_vote.curr_vote)
1571 queue_delayed_work(system_nrt_wq,
1572 &msm_host->msm_bus_vote.vote_work,
1573 msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
1574 spin_unlock_irqrestore(&host->lock, flags);
1575}
1576
1577static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
1578 struct platform_device *pdev)
1579{
1580 int rc = 0;
1581 struct msm_bus_scale_pdata *bus_pdata;
1582
1583 struct sdhci_msm_bus_voting_data *data;
1584 struct device *dev = &pdev->dev;
1585
1586 data = devm_kzalloc(dev,
1587 sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
1588 if (!data) {
1589 dev_err(&pdev->dev,
1590 "%s: failed to allocate memory\n", __func__);
1591 rc = -ENOMEM;
1592 goto out;
1593 }
1594 data->bus_pdata = msm_bus_cl_get_pdata(pdev);
1595 if (data->bus_pdata) {
1596 rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
1597 &data->bw_vecs, &data->bw_vecs_size, 0);
1598 if (rc) {
1599 dev_err(&pdev->dev,
1600 "%s: Failed to get bus-bw-vectors-bps\n",
1601 __func__);
1602 goto out;
1603 }
1604 host->pdata->voting_data = data;
1605 }
1606 if (host->pdata->voting_data &&
1607 host->pdata->voting_data->bus_pdata &&
1608 host->pdata->voting_data->bw_vecs &&
1609 host->pdata->voting_data->bw_vecs_size) {
1610
1611 bus_pdata = host->pdata->voting_data->bus_pdata;
1612 host->msm_bus_vote.client_handle =
1613 msm_bus_scale_register_client(bus_pdata);
1614 if (!host->msm_bus_vote.client_handle) {
1615 dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
1616 rc = -EFAULT;
1617 goto out;
1618 }
1619 /* cache the vote index for minimum and maximum bandwidth */
1620 host->msm_bus_vote.min_bw_vote =
1621 sdhci_msm_bus_get_vote_for_bw(host, 0);
1622 host->msm_bus_vote.max_bw_vote =
1623 sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
1624 } else {
1625 devm_kfree(dev, data);
1626 }
1627
1628out:
1629 return rc;
1630}
1631
1632static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
1633{
1634 if (host->msm_bus_vote.client_handle)
1635 msm_bus_scale_unregister_client(
1636 host->msm_bus_vote.client_handle);
1637}
1638
1639static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
1640{
1641 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1642 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1643 struct mmc_ios *ios = &host->mmc->ios;
1644 unsigned int bw;
1645
1646 if (!msm_host->msm_bus_vote.client_handle)
1647 return;
1648
1649 bw = sdhci_get_bw_required(host, ios);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301650 if (enable) {
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301651 sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05301652 } else {
1653 /*
1654 * If clock gating is enabled, then remove the vote
1655 * immediately because clocks will be disabled only
1656 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
1657 * additional delay is required to remove the bus vote.
1658 */
1659 if (host->mmc->clkgate_delay)
1660 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
1661 else
1662 sdhci_msm_bus_queue_work(host);
1663 }
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05301664}
1665
Asutosh Das33a4ff52012-12-18 16:14:02 +05301666/* Regulator utility functions */
1667static int sdhci_msm_vreg_init_reg(struct device *dev,
1668 struct sdhci_msm_reg_data *vreg)
1669{
1670 int ret = 0;
1671
1672 /* check if regulator is already initialized? */
1673 if (vreg->reg)
1674 goto out;
1675
1676 /* Get the regulator handle */
1677 vreg->reg = devm_regulator_get(dev, vreg->name);
1678 if (IS_ERR(vreg->reg)) {
1679 ret = PTR_ERR(vreg->reg);
1680 pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
1681 __func__, vreg->name, ret);
1682 goto out;
1683 }
1684
Asutosh Das95afcad2013-06-28 15:03:44 +05301685 if (regulator_count_voltages(vreg->reg) > 0) {
1686 vreg->set_voltage_sup = true;
1687 /* sanity check */
1688 if (!vreg->high_vol_level || !vreg->hpm_uA) {
1689 pr_err("%s: %s invalid constraints specified\n",
1690 __func__, vreg->name);
1691 ret = -EINVAL;
1692 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301693 }
1694
1695out:
1696 return ret;
1697}
1698
1699static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
1700{
1701 if (vreg->reg)
1702 devm_regulator_put(vreg->reg);
1703}
1704
1705static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
1706 *vreg, int uA_load)
1707{
1708 int ret = 0;
1709
1710 /*
1711 * regulators that do not support regulator_set_voltage also
1712 * do not support regulator_set_optimum_mode
1713 */
Asutosh Das95afcad2013-06-28 15:03:44 +05301714 if (vreg->set_voltage_sup) {
1715 ret = regulator_set_optimum_mode(vreg->reg, uA_load);
1716 if (ret < 0)
1717 pr_err("%s: regulator_set_optimum_mode(reg=%s,uA_load=%d) failed. ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301718 __func__, vreg->name, uA_load, ret);
1719 else
1720 /*
1721 * regulator_set_optimum_mode() can return non zero
1722 * value even for success case.
1723 */
1724 ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301725 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301726 return ret;
1727}
1728
1729static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
1730 int min_uV, int max_uV)
1731{
1732 int ret = 0;
Asutosh Das95afcad2013-06-28 15:03:44 +05301733 if (vreg->set_voltage_sup) {
1734 ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
1735 if (ret) {
1736 pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
Asutosh Das33a4ff52012-12-18 16:14:02 +05301737 __func__, vreg->name, min_uV, max_uV, ret);
1738 }
Asutosh Das95afcad2013-06-28 15:03:44 +05301739 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301740
1741 return ret;
1742}
1743
1744static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
1745{
1746 int ret = 0;
1747
1748 /* Put regulator in HPM (high power mode) */
1749 ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
1750 if (ret < 0)
1751 return ret;
1752
1753 if (!vreg->is_enabled) {
1754 /* Set voltage level */
1755 ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
1756 vreg->high_vol_level);
1757 if (ret)
1758 return ret;
1759 }
1760 ret = regulator_enable(vreg->reg);
1761 if (ret) {
1762 pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
1763 __func__, vreg->name, ret);
1764 return ret;
1765 }
1766 vreg->is_enabled = true;
1767 return ret;
1768}
1769
1770static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
1771{
1772 int ret = 0;
1773
1774 /* Never disable regulator marked as always_on */
1775 if (vreg->is_enabled && !vreg->is_always_on) {
1776 ret = regulator_disable(vreg->reg);
1777 if (ret) {
1778 pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
1779 __func__, vreg->name, ret);
1780 goto out;
1781 }
1782 vreg->is_enabled = false;
1783
1784 ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
1785 if (ret < 0)
1786 goto out;
1787
1788 /* Set min. voltage level to 0 */
1789 ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
1790 if (ret)
1791 goto out;
1792 } else if (vreg->is_enabled && vreg->is_always_on) {
1793 if (vreg->lpm_sup) {
1794 /* Put always_on regulator in LPM (low power mode) */
1795 ret = sdhci_msm_vreg_set_optimum_mode(vreg,
1796 vreg->lpm_uA);
1797 if (ret < 0)
1798 goto out;
1799 }
1800 }
1801out:
1802 return ret;
1803}
1804
1805static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
1806 bool enable, bool is_init)
1807{
1808 int ret = 0, i;
1809 struct sdhci_msm_slot_reg_data *curr_slot;
1810 struct sdhci_msm_reg_data *vreg_table[2];
1811
1812 curr_slot = pdata->vreg_data;
1813 if (!curr_slot) {
1814 pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
1815 __func__);
1816 goto out;
1817 }
1818
1819 vreg_table[0] = curr_slot->vdd_data;
1820 vreg_table[1] = curr_slot->vdd_io_data;
1821
1822 for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
1823 if (vreg_table[i]) {
1824 if (enable)
1825 ret = sdhci_msm_vreg_enable(vreg_table[i]);
1826 else
1827 ret = sdhci_msm_vreg_disable(vreg_table[i]);
1828 if (ret)
1829 goto out;
1830 }
1831 }
1832out:
1833 return ret;
1834}
1835
1836/*
1837 * Reset vreg by ensuring it is off during probe. A call
1838 * to enable vreg is needed to balance disable vreg
1839 */
1840static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
1841{
1842 int ret;
1843
1844 ret = sdhci_msm_setup_vreg(pdata, 1, true);
1845 if (ret)
1846 return ret;
1847 ret = sdhci_msm_setup_vreg(pdata, 0, true);
1848 return ret;
1849}
1850
1851/* This init function should be called only once for each SDHC slot */
1852static int sdhci_msm_vreg_init(struct device *dev,
1853 struct sdhci_msm_pltfm_data *pdata,
1854 bool is_init)
1855{
1856 int ret = 0;
1857 struct sdhci_msm_slot_reg_data *curr_slot;
1858 struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
1859
1860 curr_slot = pdata->vreg_data;
1861 if (!curr_slot)
1862 goto out;
1863
1864 curr_vdd_reg = curr_slot->vdd_data;
1865 curr_vdd_io_reg = curr_slot->vdd_io_data;
1866
1867 if (!is_init)
1868 /* Deregister all regulators from regulator framework */
1869 goto vdd_io_reg_deinit;
1870
1871 /*
1872 * Get the regulator handle from voltage regulator framework
1873 * and then try to set the voltage level for the regulator
1874 */
1875 if (curr_vdd_reg) {
1876 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
1877 if (ret)
1878 goto out;
1879 }
1880 if (curr_vdd_io_reg) {
1881 ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
1882 if (ret)
1883 goto vdd_reg_deinit;
1884 }
1885 ret = sdhci_msm_vreg_reset(pdata);
1886 if (ret)
1887 dev_err(dev, "vreg reset failed (%d)\n", ret);
1888 goto out;
1889
1890vdd_io_reg_deinit:
1891 if (curr_vdd_io_reg)
1892 sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
1893vdd_reg_deinit:
1894 if (curr_vdd_reg)
1895 sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
1896out:
1897 return ret;
1898}
1899
1900
1901static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
1902 enum vdd_io_level level,
1903 unsigned int voltage_level)
1904{
1905 int ret = 0;
1906 int set_level;
1907 struct sdhci_msm_reg_data *vdd_io_reg;
1908
1909 if (!pdata->vreg_data)
1910 return ret;
1911
1912 vdd_io_reg = pdata->vreg_data->vdd_io_data;
1913 if (vdd_io_reg && vdd_io_reg->is_enabled) {
1914 switch (level) {
1915 case VDD_IO_LOW:
1916 set_level = vdd_io_reg->low_vol_level;
1917 break;
1918 case VDD_IO_HIGH:
1919 set_level = vdd_io_reg->high_vol_level;
1920 break;
1921 case VDD_IO_SET_LEVEL:
1922 set_level = voltage_level;
1923 break;
1924 default:
1925 pr_err("%s: invalid argument level = %d",
1926 __func__, level);
1927 ret = -EINVAL;
1928 return ret;
1929 }
1930 ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
1931 set_level);
1932 }
1933 return ret;
1934}
1935
1936static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1937{
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07001938 struct sdhci_host *host = (struct sdhci_host *)data;
1939 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1940 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301941 u8 irq_status = 0;
1942 u8 irq_ack = 0;
1943 int ret = 0;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301944 int pwr_state = 0, io_level = 0;
1945 unsigned long flags;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301946
1947 irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
1948 pr_debug("%s: Received IRQ(%d), status=0x%x\n",
1949 mmc_hostname(msm_host->mmc), irq, irq_status);
1950
1951 /* Clear the interrupt */
1952 writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
1953 /*
1954 * SDHC has core_mem and hc_mem device memory and these memory
1955 * addresses do not fall within 1KB region. Hence, any update to
1956 * core_mem address space would require an mb() to ensure this gets
1957 * completed before its next update to registers within hc_mem.
1958 */
1959 mb();
1960
1961 /* Handle BUS ON/OFF*/
1962 if (irq_status & CORE_PWRCTL_BUS_ON) {
1963 ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301964 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301965 ret = sdhci_msm_setup_pins(msm_host->pdata, true);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301966 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1967 VDD_IO_HIGH, 0);
1968 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301969 if (ret)
1970 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1971 else
1972 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301973
1974 pwr_state = REQ_BUS_ON;
1975 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301976 }
1977 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1978 ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301979 if (!ret) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05301980 ret = sdhci_msm_setup_pins(msm_host->pdata, false);
Sahitya Tummala179e7382013-03-20 19:24:01 +05301981 ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
1982 VDD_IO_LOW, 0);
1983 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05301984 if (ret)
1985 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1986 else
1987 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05301988
1989 pwr_state = REQ_BUS_OFF;
1990 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05301991 }
1992 /* Handle IO LOW/HIGH */
1993 if (irq_status & CORE_PWRCTL_IO_LOW) {
1994 /* Switch voltage Low */
1995 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
1996 if (ret)
1997 irq_ack |= CORE_PWRCTL_IO_FAIL;
1998 else
1999 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302000
2001 io_level = REQ_IO_LOW;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302002 }
2003 if (irq_status & CORE_PWRCTL_IO_HIGH) {
2004 /* Switch voltage High */
2005 ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
2006 if (ret)
2007 irq_ack |= CORE_PWRCTL_IO_FAIL;
2008 else
2009 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302010
2011 io_level = REQ_IO_HIGH;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302012 }
2013
2014 /* ACK status to the core */
2015 writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
2016 /*
2017 * SDHC has core_mem and hc_mem device memory and these memory
2018 * addresses do not fall within 1KB region. Hence, any update to
2019 * core_mem address space would require an mb() to ensure this gets
2020 * completed before its next update to registers within hc_mem.
2021 */
2022 mb();
2023
Sahitya Tummala179e7382013-03-20 19:24:01 +05302024 if (io_level & REQ_IO_HIGH)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002025 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2026 ~CORE_IO_PAD_PWR_SWITCH),
2027 host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302028 else if (io_level & REQ_IO_LOW)
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002029 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
2030 CORE_IO_PAD_PWR_SWITCH),
2031 host->ioaddr + CORE_VENDOR_SPEC);
2032 mb();
2033
Asutosh Das33a4ff52012-12-18 16:14:02 +05302034 pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
2035 mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
Sahitya Tummala179e7382013-03-20 19:24:01 +05302036 spin_lock_irqsave(&host->lock, flags);
2037 if (pwr_state)
2038 msm_host->curr_pwr_state = pwr_state;
2039 if (io_level)
2040 msm_host->curr_io_level = io_level;
2041 complete(&msm_host->pwr_irq_completion);
2042 spin_unlock_irqrestore(&host->lock, flags);
2043
Asutosh Das33a4ff52012-12-18 16:14:02 +05302044 return IRQ_HANDLED;
2045}
2046
2047/* This function returns the max. current supported by VDD rail in mA */
2048static unsigned int sdhci_msm_get_vreg_vdd_max_current(struct sdhci_msm_host
2049 *host)
2050{
2051 struct sdhci_msm_slot_reg_data *curr_slot = host->pdata->vreg_data;
2052 if (!curr_slot)
2053 return 0;
2054 if (curr_slot->vdd_data)
2055 return curr_slot->vdd_data->hpm_uA / 1000;
2056 else
2057 return 0;
2058}
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302059
2060static ssize_t
2061show_polling(struct device *dev, struct device_attribute *attr, char *buf)
2062{
2063 struct sdhci_host *host = dev_get_drvdata(dev);
2064 int poll;
2065 unsigned long flags;
2066
2067 spin_lock_irqsave(&host->lock, flags);
2068 poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
2069 spin_unlock_irqrestore(&host->lock, flags);
2070
2071 return snprintf(buf, PAGE_SIZE, "%d\n", poll);
2072}
2073
2074static ssize_t
2075store_polling(struct device *dev, struct device_attribute *attr,
2076 const char *buf, size_t count)
2077{
2078 struct sdhci_host *host = dev_get_drvdata(dev);
2079 int value;
2080 unsigned long flags;
2081
2082 if (!kstrtou32(buf, 0, &value)) {
2083 spin_lock_irqsave(&host->lock, flags);
2084 if (value) {
2085 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
2086 mmc_detect_change(host->mmc, 0);
2087 } else {
2088 host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2089 }
2090 spin_unlock_irqrestore(&host->lock, flags);
2091 }
2092 return count;
2093}
2094
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302095static ssize_t
2096show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2097 char *buf)
2098{
2099 struct sdhci_host *host = dev_get_drvdata(dev);
2100 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2101 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2102
2103 return snprintf(buf, PAGE_SIZE, "%u\n",
2104 msm_host->msm_bus_vote.is_max_bw_needed);
2105}
2106
2107static ssize_t
2108store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
2109 const char *buf, size_t count)
2110{
2111 struct sdhci_host *host = dev_get_drvdata(dev);
2112 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2113 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2114 uint32_t value;
2115 unsigned long flags;
2116
2117 if (!kstrtou32(buf, 0, &value)) {
2118 spin_lock_irqsave(&host->lock, flags);
2119 msm_host->msm_bus_vote.is_max_bw_needed = !!value;
2120 spin_unlock_irqrestore(&host->lock, flags);
2121 }
2122 return count;
2123}
Asutosh Das33a4ff52012-12-18 16:14:02 +05302124
Sahitya Tummala179e7382013-03-20 19:24:01 +05302125static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
Asutosh Das33a4ff52012-12-18 16:14:02 +05302126{
2127 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2128 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala179e7382013-03-20 19:24:01 +05302129 unsigned long flags;
2130 bool done = false;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302131
Sahitya Tummala179e7382013-03-20 19:24:01 +05302132 spin_lock_irqsave(&host->lock, flags);
2133 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
2134 mmc_hostname(host->mmc), __func__, req_type,
2135 msm_host->curr_pwr_state, msm_host->curr_io_level);
2136 if ((req_type & msm_host->curr_pwr_state) ||
2137 (req_type & msm_host->curr_io_level))
2138 done = true;
2139 spin_unlock_irqrestore(&host->lock, flags);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302140
Sahitya Tummala179e7382013-03-20 19:24:01 +05302141 /*
2142 * This is needed here to hanlde a case where IRQ gets
2143 * triggered even before this function is called so that
2144 * x->done counter of completion gets reset. Otherwise,
2145 * next call to wait_for_completion returns immediately
2146 * without actually waiting for the IRQ to be handled.
2147 */
2148 if (done)
2149 init_completion(&msm_host->pwr_irq_completion);
2150 else
2151 wait_for_completion(&msm_host->pwr_irq_completion);
2152
2153 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
2154 __func__, req_type);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302155}
2156
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002157static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
2158{
2159 if (enable)
2160 writel_relaxed((readl_relaxed(host->ioaddr +
2161 CORE_DLL_CONFIG) | CORE_CDR_EN),
2162 host->ioaddr + CORE_DLL_CONFIG);
2163 else
2164 writel_relaxed((readl_relaxed(host->ioaddr +
2165 CORE_DLL_CONFIG) & ~CORE_CDR_EN),
2166 host->ioaddr + CORE_DLL_CONFIG);
2167}
2168
Asutosh Das3781bd82013-01-10 21:11:04 +05302169static unsigned int sdhci_msm_max_segs(void)
2170{
2171 return SDHCI_MSM_MAX_SEGMENTS;
2172}
2173
Sahitya Tummala00240122013-02-28 19:50:51 +05302174static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302175{
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302176 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2177 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302178
Sahitya Tummala00240122013-02-28 19:50:51 +05302179 return msm_host->pdata->sup_clk_table[0];
2180}
2181
2182static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
2183{
2184 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2185 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2186 int max_clk_index = msm_host->pdata->sup_clk_cnt;
2187
2188 return msm_host->pdata->sup_clk_table[max_clk_index - 1];
2189}
2190
2191static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
2192 u32 req_clk)
2193{
2194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2195 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2196 unsigned int sel_clk = -1;
2197 unsigned char cnt;
2198
2199 if (req_clk < sdhci_msm_get_min_clock(host)) {
2200 sel_clk = sdhci_msm_get_min_clock(host);
2201 return sel_clk;
2202 }
2203
2204 for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
2205 if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
2206 break;
2207 } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
2208 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2209 break;
2210 } else {
2211 sel_clk = msm_host->pdata->sup_clk_table[cnt];
2212 }
2213 }
2214 return sel_clk;
2215}
2216
Asutosh Das6c0804b2013-11-08 12:33:47 +05302217static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
2218{
2219 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2220 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2221 int rc = 0;
2222
2223 if (atomic_read(&msm_host->controller_clock))
2224 return 0;
2225
2226 sdhci_msm_bus_voting(host, 1);
2227
2228 if (!IS_ERR(msm_host->pclk)) {
2229 rc = clk_prepare_enable(msm_host->pclk);
2230 if (rc) {
2231 pr_err("%s: %s: failed to enable the pclk with error %d\n",
2232 mmc_hostname(host->mmc), __func__, rc);
2233 goto remove_vote;
2234 }
2235 }
2236
2237 rc = clk_prepare_enable(msm_host->clk);
2238 if (rc) {
2239 pr_err("%s: %s: failed to enable the host-clk with error %d\n",
2240 mmc_hostname(host->mmc), __func__, rc);
2241 goto disable_pclk;
2242 }
2243
2244 atomic_set(&msm_host->controller_clock, 1);
2245 pr_debug("%s: %s: enabled controller clock\n",
2246 mmc_hostname(host->mmc), __func__);
2247 goto out;
2248
2249disable_pclk:
2250 if (!IS_ERR(msm_host->pclk))
2251 clk_disable_unprepare(msm_host->pclk);
2252remove_vote:
2253 if (msm_host->msm_bus_vote.client_handle)
2254 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2255out:
2256 return rc;
2257}
2258
2259
2260
Sahitya Tummala00240122013-02-28 19:50:51 +05302261static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
2262{
2263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2264 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2265 int rc = 0;
2266
2267 if (enable && !atomic_read(&msm_host->clks_on)) {
2268 pr_debug("%s: request to enable clocks\n",
2269 mmc_hostname(host->mmc));
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302270
Asutosh Das6c0804b2013-11-08 12:33:47 +05302271 /*
2272 * The bus-width or the clock rate might have changed
2273 * after controller clocks are enbaled, update bus vote
2274 * in such case.
2275 */
2276 if (atomic_read(&msm_host->controller_clock))
2277 sdhci_msm_bus_voting(host, 1);
2278
2279 rc = sdhci_msm_enable_controller_clock(host);
2280 if (rc)
2281 goto remove_vote;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302282
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302283 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2284 rc = clk_prepare_enable(msm_host->bus_clk);
2285 if (rc) {
2286 pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
2287 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302288 goto disable_controller_clk;
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302289 }
2290 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002291 if (!IS_ERR(msm_host->ff_clk)) {
2292 rc = clk_prepare_enable(msm_host->ff_clk);
2293 if (rc) {
2294 pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
2295 mmc_hostname(host->mmc), __func__, rc);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302296 goto disable_bus_clk;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002297 }
2298 }
2299 if (!IS_ERR(msm_host->sleep_clk)) {
2300 rc = clk_prepare_enable(msm_host->sleep_clk);
2301 if (rc) {
2302 pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
2303 mmc_hostname(host->mmc), __func__, rc);
2304 goto disable_ff_clk;
2305 }
2306 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302307 mb();
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302308
Sahitya Tummala00240122013-02-28 19:50:51 +05302309 } else if (!enable && atomic_read(&msm_host->clks_on)) {
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302310 pr_debug("%s: request to disable clocks\n",
2311 mmc_hostname(host->mmc));
2312 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2313 mb();
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002314 if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
2315 clk_disable_unprepare(msm_host->sleep_clk);
2316 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2317 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302318 clk_disable_unprepare(msm_host->clk);
2319 if (!IS_ERR(msm_host->pclk))
2320 clk_disable_unprepare(msm_host->pclk);
2321 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2322 clk_disable_unprepare(msm_host->bus_clk);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302323
Asutosh Das6c0804b2013-11-08 12:33:47 +05302324 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302325 sdhci_msm_bus_voting(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302326 }
Sahitya Tummala00240122013-02-28 19:50:51 +05302327 atomic_set(&msm_host->clks_on, enable);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302328 goto out;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002329disable_ff_clk:
2330 if (!IS_ERR_OR_NULL(msm_host->ff_clk))
2331 clk_disable_unprepare(msm_host->ff_clk);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302332disable_bus_clk:
2333 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2334 clk_disable_unprepare(msm_host->bus_clk);
Asutosh Das6c0804b2013-11-08 12:33:47 +05302335disable_controller_clk:
2336 if (!IS_ERR_OR_NULL(msm_host->clk))
2337 clk_disable_unprepare(msm_host->clk);
2338 if (!IS_ERR_OR_NULL(msm_host->pclk))
2339 clk_disable_unprepare(msm_host->pclk);
2340 atomic_set(&msm_host->controller_clock, 0);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302341remove_vote:
2342 if (msm_host->msm_bus_vote.client_handle)
2343 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302344out:
Sahitya Tummala00240122013-02-28 19:50:51 +05302345 return rc;
2346}
2347
2348static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
2349{
2350 int rc;
2351 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2352 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2353 struct mmc_ios curr_ios = host->mmc->ios;
2354 u32 sup_clock, ddr_clock;
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302355 bool curr_pwrsave;
Sahitya Tummala00240122013-02-28 19:50:51 +05302356
2357 if (!clock) {
Sujit Reddy Thumma0e9ec032014-01-10 10:58:54 +05302358 /*
2359 * disable pwrsave to ensure clock is not auto-gated until
2360 * the rate is >400KHz (initialization complete).
2361 */
2362 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2363 ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
Sahitya Tummala00240122013-02-28 19:50:51 +05302364 sdhci_msm_prepare_clocks(host, false);
2365 host->clock = clock;
2366 return;
2367 }
2368
2369 rc = sdhci_msm_prepare_clocks(host, true);
2370 if (rc)
2371 return;
2372
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302373 curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
2374 CORE_CLK_PWRSAVE);
Sahitya Tummala2c4bd642013-08-29 16:21:08 +05302375 if ((clock > 400000) &&
Sahitya Tummalac69e2a22013-06-24 09:55:33 +05302376 !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
2377 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2378 | CORE_CLK_PWRSAVE,
2379 host->ioaddr + CORE_VENDOR_SPEC);
2380 /*
2381 * Disable pwrsave for a newly added card if doesn't allow clock
2382 * gating.
2383 */
2384 else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
2385 writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2386 & ~CORE_CLK_PWRSAVE,
2387 host->ioaddr + CORE_VENDOR_SPEC);
2388
Sahitya Tummala00240122013-02-28 19:50:51 +05302389 sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002390 if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
2391 (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
Sahitya Tummala00240122013-02-28 19:50:51 +05302392 /*
2393 * The SDHC requires internal clock frequency to be double the
2394 * actual clock that will be set for DDR mode. The controller
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002395 * uses the faster clock(100/400MHz) for some of its parts and
2396 * send the actual required clock (50/200MHz) to the card.
Sahitya Tummala00240122013-02-28 19:50:51 +05302397 */
2398 ddr_clock = clock * 2;
2399 sup_clock = sdhci_msm_get_sup_clk_rate(host,
2400 ddr_clock);
2401 }
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002402
2403 /*
2404 * In general all timing modes are controlled via UHS mode select in
2405 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
2406 * their respective modes defined here, hence we use these values.
2407 *
2408 * HS200 - SDR104 (Since they both are equivalent in functionality)
2409 * HS400 - This involves multiple configurations
2410 * Initially SDR104 - when tuning is required as HS200
2411 * Then when switching to DDR @ 400MHz (HS400) we use
2412 * the vendor specific HC_SELECT_IN to control the mode.
2413 *
2414 * In addition to controlling the modes we also need to select the
2415 * correct input clock for DLL depending on the mode.
2416 *
2417 * HS400 - divided clock (free running MCLK/2)
2418 * All other modes - default (free running MCLK)
2419 */
2420 if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
2421 /* Select the divided clock (free running MCLK/2) */
2422 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2423 & ~CORE_HC_MCLK_SEL_MASK)
2424 | CORE_HC_MCLK_SEL_HS400),
2425 host->ioaddr + CORE_VENDOR_SPEC);
2426 /*
2427 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
2428 * register
2429 */
2430 if (msm_host->tuning_done && !msm_host->calibration_done) {
2431 /*
2432 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
2433 * field in VENDOR_SPEC_FUNC
2434 */
2435 writel_relaxed((readl_relaxed(host->ioaddr + \
2436 CORE_VENDOR_SPEC)
2437 | CORE_HC_SELECT_IN_HS400
2438 | CORE_HC_SELECT_IN_EN),
2439 host->ioaddr + CORE_VENDOR_SPEC);
2440 }
2441 } else {
2442 /* Select the default clock (free running MCLK) */
2443 writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2444 & ~CORE_HC_MCLK_SEL_MASK)
2445 | CORE_HC_MCLK_SEL_DFLT),
2446 host->ioaddr + CORE_VENDOR_SPEC);
2447
2448 /*
2449 * Disable HC_SELECT_IN to be able to use the UHS mode select
2450 * configuration from Host Control2 register for all other
2451 * modes.
2452 *
2453 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
2454 * in VENDOR_SPEC_FUNC
2455 */
2456 writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
2457 & ~CORE_HC_SELECT_IN_EN
2458 & ~CORE_HC_SELECT_IN_MASK),
2459 host->ioaddr + CORE_VENDOR_SPEC);
2460 }
2461 mb();
2462
Sahitya Tummala00240122013-02-28 19:50:51 +05302463 if (sup_clock != msm_host->clk_rate) {
2464 pr_debug("%s: %s: setting clk rate to %u\n",
2465 mmc_hostname(host->mmc), __func__, sup_clock);
2466 rc = clk_set_rate(msm_host->clk, sup_clock);
2467 if (rc) {
2468 pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
2469 mmc_hostname(host->mmc), __func__,
2470 sup_clock, rc);
2471 return;
2472 }
2473 msm_host->clk_rate = sup_clock;
2474 host->clock = clock;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302475 /*
2476 * Update the bus vote in case of frequency change due to
2477 * clock scaling.
2478 */
2479 sdhci_msm_bus_voting(host, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302480 }
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302481}
2482
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302483static int sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
2484 unsigned int uhs)
2485{
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002486 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2487 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302488 u16 ctrl_2;
2489
2490 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2491 /* Select Bus Speed Mode for host */
2492 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002493 if (uhs == MMC_TIMING_MMC_HS400)
2494 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2495 else if (uhs == MMC_TIMING_MMC_HS200)
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302496 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2497 else if (uhs == MMC_TIMING_UHS_SDR12)
2498 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2499 else if (uhs == MMC_TIMING_UHS_SDR25)
2500 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2501 else if (uhs == MMC_TIMING_UHS_SDR50)
2502 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2503 else if (uhs == MMC_TIMING_UHS_SDR104)
2504 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2505 else if (uhs == MMC_TIMING_UHS_DDR50)
2506 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
Sahitya Tummala00240122013-02-28 19:50:51 +05302507 /*
2508 * When clock frquency is less than 100MHz, the feedback clock must be
2509 * provided and DLL must not be used so that tuning can be skipped. To
2510 * provide feedback clock, the mode selection can be any value less
2511 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
2512 */
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002513 if (host->clock <= CORE_FREQ_100MHZ) {
2514 if ((uhs == MMC_TIMING_MMC_HS400) ||
2515 (uhs == MMC_TIMING_MMC_HS200) ||
2516 (uhs == MMC_TIMING_UHS_SDR104))
2517 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
Sahitya Tummala00240122013-02-28 19:50:51 +05302518
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002519 /*
2520 * Make sure DLL is disabled when not required
2521 *
2522 * Write 1 to DLL_RST bit of DLL_CONFIG register
2523 */
2524 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2525 | CORE_DLL_RST),
2526 host->ioaddr + CORE_DLL_CONFIG);
2527
2528 /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
2529 writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
2530 | CORE_DLL_PDN),
2531 host->ioaddr + CORE_DLL_CONFIG);
2532 mb();
2533
2534 /*
2535 * The DLL needs to be restored and CDCLP533 recalibrated
2536 * when the clock frequency is set back to 400MHz.
2537 */
2538 msm_host->calibration_done = false;
2539 }
2540
2541 pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
2542 mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302543 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2544
2545 return 0;
2546}
2547
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002548/*
2549 * sdhci_msm_disable_data_xfer - disable undergoing AHB bus data transfer
2550 *
2551 * Write 0 to bit 0 in MCI_DATA_CTL (offset 0x2C) - clearing TxActive bit by
2552 * access to legacy registers. It will stop current burst and prevent start of
2553 * the next on.
2554 *
2555 * Polling CORE_AHB_DATA_DELAY_US timeout, by reading bit 13:12 until they are 0
2556 * in CORE_SDCC_DEBUG_REG (offset 0x124) will validate that AHB burst was
2557 * completed and a new one didn't start.
2558 *
2559 * Waiting for 4us while AHB finishes descriptors fetch.
2560 */
2561static void sdhci_msm_disable_data_xfer(struct sdhci_host *host)
2562{
2563 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2564 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2565 u32 value;
2566 int ret;
Venkat Gopalakrishnan0a179c82013-06-26 17:56:11 -07002567 u32 version;
2568
2569 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
2570 /* Core version 3.1.0 doesn't need this workaround */
2571 if (version == CORE_VERSION_310)
2572 return;
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002573
2574 value = readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CTRL);
2575 value &= ~(u32)CORE_MCI_DPSM_ENABLE;
2576 writel_relaxed(value, msm_host->core_mem + CORE_MCI_DATA_CTRL);
2577
2578 /* Enable the test bus for device slot */
2579 writel_relaxed(CORE_TESTBUS_ENA | CORE_TESTBUS_SEL2,
2580 msm_host->core_mem + CORE_TESTBUS_CONFIG);
2581
2582 ret = readl_poll_timeout_noirq(msm_host->core_mem
2583 + CORE_SDCC_DEBUG_REG, value,
2584 !(value & CORE_DEBUG_REG_AHB_HTRANS),
2585 CORE_AHB_DATA_DELAY_US, 1);
2586 if (ret) {
2587 pr_err("%s: %s: can't stop ongoing AHB bus access by ADMA\n",
2588 mmc_hostname(host->mmc), __func__);
2589 BUG();
2590 }
2591 /* Disable the test bus for device slot */
2592 value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
2593 value &= ~CORE_TESTBUS_ENA;
2594 writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
2595
2596 udelay(CORE_AHB_DESC_DELAY_US);
2597}
2598
Asutosh Das33a4ff52012-12-18 16:14:02 +05302599static struct sdhci_ops sdhci_msm_ops = {
Sahitya Tummala5e4f9642013-03-21 11:13:25 +05302600 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302601 .check_power_status = sdhci_msm_check_power_status,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002602 .execute_tuning = sdhci_msm_execute_tuning,
2603 .toggle_cdr = sdhci_msm_toggle_cdr,
Asutosh Das3781bd82013-01-10 21:11:04 +05302604 .get_max_segments = sdhci_msm_max_segs,
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302605 .set_clock = sdhci_msm_set_clock,
Sahitya Tummala00240122013-02-28 19:50:51 +05302606 .get_min_clock = sdhci_msm_get_min_clock,
2607 .get_max_clock = sdhci_msm_get_max_clock,
Konstantin Dorfmancceca8d2013-04-24 15:51:31 +03002608 .disable_data_xfer = sdhci_msm_disable_data_xfer,
Asutosh Das6c0804b2013-11-08 12:33:47 +05302609 .enable_controller_clock = sdhci_msm_enable_controller_clock,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302610};
2611
2612static int __devinit sdhci_msm_probe(struct platform_device *pdev)
2613{
2614 struct sdhci_host *host;
2615 struct sdhci_pltfm_host *pltfm_host;
2616 struct sdhci_msm_host *msm_host;
2617 struct resource *core_memres = NULL;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302618 int ret = 0, dead = 0;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302619 u32 vdd_max_current;
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002620 u16 host_version;
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302621 u32 pwr, irq_status, irq_ctl;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302622
2623 pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
2624 msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
2625 GFP_KERNEL);
2626 if (!msm_host) {
2627 ret = -ENOMEM;
2628 goto out;
2629 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302630
2631 msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
2632 host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata);
2633 if (IS_ERR(host)) {
2634 ret = PTR_ERR(host);
2635 goto out;
2636 }
2637
2638 pltfm_host = sdhci_priv(host);
2639 pltfm_host->priv = msm_host;
2640 msm_host->mmc = host->mmc;
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302641 msm_host->pdev = pdev;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302642
2643 /* Extract platform data */
2644 if (pdev->dev.of_node) {
Venkat Gopalakrishnanc61ab7e2013-03-11 12:17:57 -07002645 ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
2646 if (ret < 0) {
2647 dev_err(&pdev->dev, "Failed to get slot index %d\n",
2648 ret);
2649 goto pltfm_free;
2650 }
2651 if (disable_slots & (1 << (ret - 1))) {
2652 dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
2653 ret);
2654 ret = -ENODEV;
2655 goto pltfm_free;
2656 }
2657
Asutosh Das33a4ff52012-12-18 16:14:02 +05302658 msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev);
2659 if (!msm_host->pdata) {
2660 dev_err(&pdev->dev, "DT parsing error\n");
2661 goto pltfm_free;
2662 }
2663 } else {
2664 dev_err(&pdev->dev, "No device tree node\n");
2665 goto pltfm_free;
2666 }
2667
2668 /* Setup Clocks */
2669
2670 /* Setup SDCC bus voter clock. */
2671 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
2672 if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
2673 /* Vote for max. clk rate for max. performance */
2674 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2675 if (ret)
2676 goto pltfm_free;
2677 ret = clk_prepare_enable(msm_host->bus_clk);
2678 if (ret)
2679 goto pltfm_free;
2680 }
2681
2682 /* Setup main peripheral bus clock */
2683 msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
2684 if (!IS_ERR(msm_host->pclk)) {
2685 ret = clk_prepare_enable(msm_host->pclk);
2686 if (ret)
2687 goto bus_clk_disable;
2688 }
Asutosh Das6c0804b2013-11-08 12:33:47 +05302689 atomic_set(&msm_host->controller_clock, 1);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302690
2691 /* Setup SDC MMC clock */
2692 msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
2693 if (IS_ERR(msm_host->clk)) {
2694 ret = PTR_ERR(msm_host->clk);
2695 goto pclk_disable;
2696 }
2697
Sahitya Tummala00240122013-02-28 19:50:51 +05302698 /* Set to the minimum supported clock frequency */
2699 ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
2700 if (ret) {
2701 dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302702 goto pclk_disable;
Sahitya Tummala00240122013-02-28 19:50:51 +05302703 }
Sahitya Tummalac954ab02013-06-07 13:03:07 +05302704 ret = clk_prepare_enable(msm_host->clk);
2705 if (ret)
2706 goto pclk_disable;
2707
Sahitya Tummala00240122013-02-28 19:50:51 +05302708 msm_host->clk_rate = sdhci_msm_get_min_clock(host);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302709 atomic_set(&msm_host->clks_on, 1);
Sahitya Tummala00240122013-02-28 19:50:51 +05302710
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002711 /* Setup CDC calibration fixed feedback clock */
2712 msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
2713 if (!IS_ERR(msm_host->ff_clk)) {
2714 ret = clk_prepare_enable(msm_host->ff_clk);
2715 if (ret)
2716 goto clk_disable;
2717 }
2718
2719 /* Setup CDC calibration sleep clock */
2720 msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
2721 if (!IS_ERR(msm_host->sleep_clk)) {
2722 ret = clk_prepare_enable(msm_host->sleep_clk);
2723 if (ret)
2724 goto ff_clk_disable;
2725 }
2726
Venkat Gopalakrishnanb6cfa292013-06-12 11:16:37 -07002727 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2728
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302729 ret = sdhci_msm_bus_register(msm_host, pdev);
2730 if (ret)
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002731 goto sleep_clk_disable;
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302732
2733 if (msm_host->msm_bus_vote.client_handle)
2734 INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
2735 sdhci_msm_bus_work);
2736 sdhci_msm_bus_voting(host, 1);
2737
Asutosh Das33a4ff52012-12-18 16:14:02 +05302738 /* Setup regulators */
2739 ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
2740 if (ret) {
2741 dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302742 goto bus_unregister;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302743 }
2744
2745 /* Reset the core and Enable SDHC mode */
2746 core_memres = platform_get_resource_byname(pdev,
2747 IORESOURCE_MEM, "core_mem");
2748 msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
2749 resource_size(core_memres));
2750
2751 if (!msm_host->core_mem) {
2752 dev_err(&pdev->dev, "Failed to remap registers\n");
2753 ret = -ENOMEM;
2754 goto vreg_deinit;
2755 }
2756
Stepan Moskovchenkoe0938982013-09-13 22:19:33 -07002757 /* Unset HC_MODE_EN bit in HC_MODE register */
2758 writel_relaxed(0, (msm_host->core_mem + CORE_HC_MODE));
2759
Asutosh Das33a4ff52012-12-18 16:14:02 +05302760 /* Set SW_RST bit in POWER register (Offset 0x0) */
Sahitya Tummalad5d76e72013-04-25 11:50:56 +05302761 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
2762 CORE_SW_RST, msm_host->core_mem + CORE_POWER);
2763 /*
2764 * SW reset can take upto 10HCLK + 15MCLK cycles.
2765 * Calculating based on min clk rates (hclk = 27MHz,
2766 * mclk = 400KHz) it comes to ~40us. Let's poll for
2767 * max. 1ms for reset completion.
2768 */
2769 ret = readl_poll_timeout(msm_host->core_mem + CORE_POWER,
2770 pwr, !(pwr & CORE_SW_RST), 100, 10);
2771
2772 if (ret) {
2773 dev_err(&pdev->dev, "reset failed (%d)\n", ret);
2774 goto vreg_deinit;
2775 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05302776 /* Set HC_MODE_EN bit in HC_MODE register */
2777 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
2778
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002779 /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
2780 writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
2781 FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
2782
Asutosh Das33a4ff52012-12-18 16:14:02 +05302783 /*
Subhash Jadavanic08d2062013-05-14 17:46:43 +05302784 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
2785 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
2786 * interrupt in GIC (by registering the interrupt handler), we need to
2787 * ensure that any pending power irq interrupt status is acknowledged
2788 * otherwise power irq interrupt handler would be fired prematurely.
2789 */
2790 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
2791 writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
2792 irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
2793 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
2794 irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
2795 if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
2796 irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
2797 writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
2798 /*
2799 * Ensure that above writes are propogated before interrupt enablement
2800 * in GIC.
2801 */
2802 mb();
2803
2804 /*
Asutosh Das33a4ff52012-12-18 16:14:02 +05302805 * Following are the deviations from SDHC spec v3.0 -
2806 * 1. Card detection is handled using separate GPIO.
2807 * 2. Bus power control is handled by interacting with PMIC.
2808 */
2809 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
2810 host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
Sahitya Tummala00240122013-02-28 19:50:51 +05302811 host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
2812 host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
Sahitya Tummalad6a74b02013-02-25 15:50:08 +05302813 host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
Krishna Kondaa20d3362013-04-01 21:01:59 -07002814 host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
Sahitya Tummalad2ae8832013-04-12 11:49:11 +05302815 host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
Sahitya Tummalae6886bd2013-04-12 12:11:20 +05302816 host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
Sahitya Tummala4d12d0b2013-04-12 11:59:25 +05302817 host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302818
Sahitya Tummalaf667cc12013-06-10 16:32:51 +05302819 if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
2820 host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
2821
Stephen Boyd3edbd8f2013-04-24 14:19:46 -07002822 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
Venkat Gopalakrishnane9beaa22012-09-17 16:00:15 -07002823 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2824 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2825 SDHCI_VENDOR_VER_SHIFT));
2826 if (((host_version & SDHCI_VENDOR_VER_MASK) >>
2827 SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
2828 /*
2829 * Add 40us delay in interrupt handler when
2830 * operating at initialization frequency(400KHz).
2831 */
2832 host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
2833 /*
2834 * Set Software Reset for DAT line in Software
2835 * Reset Register (Bit 2).
2836 */
2837 host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
2838 }
2839
2840 /* Setup PWRCTL irq */
Asutosh Dasbbc84782013-02-11 15:31:35 +05302841 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2842 if (msm_host->pwr_irq < 0) {
Asutosh Das33a4ff52012-12-18 16:14:02 +05302843 dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302844 msm_host->pwr_irq);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302845 goto vreg_deinit;
2846 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302847 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
Asutosh Das33a4ff52012-12-18 16:14:02 +05302848 sdhci_msm_pwr_irq, IRQF_ONESHOT,
Venkat Gopalakrishnan8609a432012-09-11 16:13:31 -07002849 dev_name(&pdev->dev), host);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302850 if (ret) {
2851 dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
Asutosh Dasbbc84782013-02-11 15:31:35 +05302852 msm_host->pwr_irq, ret);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302853 goto vreg_deinit;
2854 }
2855
2856 /* Enable pwr irq interrupts */
2857 writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
2858
Sahitya Tummala04c3a462013-01-11 11:30:45 +05302859 /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
2860 msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
2861
Asutosh Das33a4ff52012-12-18 16:14:02 +05302862 /* Set host capabilities */
2863 msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
2864 msm_host->mmc->caps |= msm_host->pdata->caps;
2865
2866 vdd_max_current = sdhci_msm_get_vreg_vdd_max_current(msm_host);
2867 if (vdd_max_current >= 800)
2868 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2869 else if (vdd_max_current >= 600)
2870 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2871 else if (vdd_max_current >= 400)
2872 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2873 else
2874 msm_host->mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2875
2876 if (vdd_max_current > 150)
2877 msm_host->mmc->caps |= MMC_CAP_SET_XPC_180 |
2878 MMC_CAP_SET_XPC_300|
2879 MMC_CAP_SET_XPC_330;
2880
2881 msm_host->mmc->caps2 |= msm_host->pdata->caps2;
Asutosh Dasbbc84782013-02-11 15:31:35 +05302882 msm_host->mmc->caps2 |= MMC_CAP2_CORE_RUNTIME_PM;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302883 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR;
2884 msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
2885 msm_host->mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC |
2886 MMC_CAP2_DETECT_ON_ERR);
2887 msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
2888 msm_host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302889 msm_host->mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
Sahitya Tummala00240122013-02-28 19:50:51 +05302890 msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
Konstantin Dorfmanfa436d52013-04-17 16:26:11 +03002891 msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
Subhash Jadavani61a52c92013-05-29 15:52:10 +05302892 msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
Asutosh Das41c35212013-09-19 11:14:27 +05302893 msm_host->mmc->caps2 |= MMC_CAP2_CORE_PM;
Asutosh Das4dc60412013-06-24 18:20:45 +05302894 msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302895
2896 if (msm_host->pdata->nonremovable)
2897 msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
2898
Sahitya Tummalab4e84042013-03-10 07:03:17 +05302899 host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
2900
Sahitya Tummala179e7382013-03-20 19:24:01 +05302901 init_completion(&msm_host->pwr_irq_completion);
2902
Sahitya Tummala62448d92013-03-12 14:57:46 +05302903 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
2904 ret = mmc_cd_gpio_request(msm_host->mmc,
2905 msm_host->pdata->status_gpio);
2906 if (ret) {
2907 dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
2908 __func__, ret);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302909 goto vreg_deinit;
Sahitya Tummala62448d92013-03-12 14:57:46 +05302910 }
2911 }
2912
Sahitya Tummala2fa7eb12013-03-20 19:34:59 +05302913 if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
2914 host->dma_mask = DMA_BIT_MASK(32);
2915 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2916 } else {
2917 dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
2918 }
2919
Asutosh Das33a4ff52012-12-18 16:14:02 +05302920 ret = sdhci_add_host(host);
2921 if (ret) {
2922 dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302923 goto free_cd_gpio;
Asutosh Das33a4ff52012-12-18 16:14:02 +05302924 }
2925
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05302926 msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
2927 msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
2928 sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
2929 msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
2930 msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
2931 ret = device_create_file(&pdev->dev,
2932 &msm_host->msm_bus_vote.max_bus_bw);
2933 if (ret)
2934 goto remove_host;
2935
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302936 if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
2937 msm_host->polling.show = show_polling;
2938 msm_host->polling.store = store_polling;
2939 sysfs_attr_init(&msm_host->polling.attr);
2940 msm_host->polling.attr.name = "polling";
2941 msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
2942 ret = device_create_file(&pdev->dev, &msm_host->polling);
2943 if (ret)
2944 goto remove_max_bus_bw_file;
2945 }
Asutosh Dasbbc84782013-02-11 15:31:35 +05302946 ret = pm_runtime_set_active(&pdev->dev);
2947 if (ret)
2948 pr_err("%s: %s: pm_runtime_set_active failed: err: %d\n",
2949 mmc_hostname(host->mmc), __func__, ret);
Asutosh Das41c35212013-09-19 11:14:27 +05302950 else if (mmc_use_core_runtime_pm(host->mmc))
Asutosh Dasbbc84782013-02-11 15:31:35 +05302951 pm_runtime_enable(&pdev->dev);
2952
Asutosh Das33a4ff52012-12-18 16:14:02 +05302953 /* Successful initialization */
2954 goto out;
2955
Sahitya Tummala3b292c32013-06-20 14:00:18 +05302956remove_max_bus_bw_file:
2957 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302958remove_host:
2959 dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
2960 sdhci_remove_host(host, dead);
Sahitya Tummala62448d92013-03-12 14:57:46 +05302961free_cd_gpio:
2962 if (gpio_is_valid(msm_host->pdata->status_gpio))
2963 mmc_cd_gpio_free(msm_host->mmc);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302964vreg_deinit:
2965 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummalac45ae732013-05-23 15:59:22 +05302966bus_unregister:
2967 if (msm_host->msm_bus_vote.client_handle)
2968 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
2969 sdhci_msm_bus_unregister(msm_host);
Venkat Gopalakrishnan2d397062013-06-23 17:36:46 -07002970sleep_clk_disable:
2971 if (!IS_ERR(msm_host->sleep_clk))
2972 clk_disable_unprepare(msm_host->sleep_clk);
2973ff_clk_disable:
2974 if (!IS_ERR(msm_host->ff_clk))
2975 clk_disable_unprepare(msm_host->ff_clk);
Asutosh Das33a4ff52012-12-18 16:14:02 +05302976clk_disable:
2977 if (!IS_ERR(msm_host->clk))
2978 clk_disable_unprepare(msm_host->clk);
2979pclk_disable:
2980 if (!IS_ERR(msm_host->pclk))
2981 clk_disable_unprepare(msm_host->pclk);
2982bus_clk_disable:
2983 if (!IS_ERR_OR_NULL(msm_host->bus_clk))
2984 clk_disable_unprepare(msm_host->bus_clk);
2985pltfm_free:
2986 sdhci_pltfm_free(pdev);
2987out:
2988 pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
2989 return ret;
2990}
2991
2992static int __devexit sdhci_msm_remove(struct platform_device *pdev)
2993{
2994 struct sdhci_host *host = platform_get_drvdata(pdev);
2995 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2996 struct sdhci_msm_host *msm_host = pltfm_host->priv;
2997 struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
2998 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2999 0xffffffff);
3000
3001 pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
Sahitya Tummala3b292c32013-06-20 14:00:18 +05303002 if (!gpio_is_valid(msm_host->pdata->status_gpio))
3003 device_remove_file(&pdev->dev, &msm_host->polling);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05303004 device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
Asutosh Das33a4ff52012-12-18 16:14:02 +05303005 sdhci_remove_host(host, dead);
Asutosh Dasbbc84782013-02-11 15:31:35 +05303006 pm_runtime_disable(&pdev->dev);
Asutosh Das33a4ff52012-12-18 16:14:02 +05303007 sdhci_pltfm_free(pdev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303008
3009 if (gpio_is_valid(msm_host->pdata->status_gpio))
3010 mmc_cd_gpio_free(msm_host->mmc);
3011
Asutosh Das33a4ff52012-12-18 16:14:02 +05303012 sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
Sahitya Tummala04c3a462013-01-11 11:30:45 +05303013
Asutosh Das33a4ff52012-12-18 16:14:02 +05303014 if (pdata->pin_data)
Asutosh Das390519d2012-12-21 12:21:42 +05303015 sdhci_msm_setup_pins(pdata, false);
Sahitya Tummala9f5cbb82013-03-10 14:12:52 +05303016
3017 if (msm_host->msm_bus_vote.client_handle) {
3018 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3019 sdhci_msm_bus_unregister(msm_host);
3020 }
Asutosh Das33a4ff52012-12-18 16:14:02 +05303021 return 0;
3022}
3023
Asutosh Dasbbc84782013-02-11 15:31:35 +05303024static int sdhci_msm_runtime_suspend(struct device *dev)
3025{
3026 struct sdhci_host *host = dev_get_drvdata(dev);
3027 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3028 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3029
3030 disable_irq(host->irq);
3031 disable_irq(msm_host->pwr_irq);
3032
Sahitya Tummalac45ae732013-05-23 15:59:22 +05303033 /*
3034 * Remove the vote immediately only if clocks are off in which
3035 * case we might have queued work to remove vote but it may not
3036 * be completed before runtime suspend or system suspend.
3037 */
3038 if (!atomic_read(&msm_host->clks_on)) {
3039 if (msm_host->msm_bus_vote.client_handle)
3040 sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
3041 }
3042
Asutosh Dasbbc84782013-02-11 15:31:35 +05303043 return 0;
3044}
3045
3046static int sdhci_msm_runtime_resume(struct device *dev)
3047{
3048 struct sdhci_host *host = dev_get_drvdata(dev);
3049 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3050 struct sdhci_msm_host *msm_host = pltfm_host->priv;
3051
3052 enable_irq(msm_host->pwr_irq);
3053 enable_irq(host->irq);
3054
3055 return 0;
3056}
3057
3058#ifdef CONFIG_PM_SLEEP
3059
3060static int sdhci_msm_suspend(struct device *dev)
3061{
3062 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303063 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3064 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303065 int ret = 0;
3066
Sahitya Tummala62448d92013-03-12 14:57:46 +05303067 if (gpio_is_valid(msm_host->pdata->status_gpio))
3068 mmc_cd_gpio_free(msm_host->mmc);
3069
Asutosh Dasbbc84782013-02-11 15:31:35 +05303070 if (pm_runtime_suspended(dev)) {
3071 pr_debug("%s: %s: already runtime suspended\n",
3072 mmc_hostname(host->mmc), __func__);
3073 goto out;
3074 }
3075
3076 return sdhci_msm_runtime_suspend(dev);
3077out:
3078 return ret;
3079}
3080
3081static int sdhci_msm_resume(struct device *dev)
3082{
3083 struct sdhci_host *host = dev_get_drvdata(dev);
Sahitya Tummala62448d92013-03-12 14:57:46 +05303084 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3085 struct sdhci_msm_host *msm_host = pltfm_host->priv;
Asutosh Dasbbc84782013-02-11 15:31:35 +05303086 int ret = 0;
3087
Sahitya Tummala62448d92013-03-12 14:57:46 +05303088 if (gpio_is_valid(msm_host->pdata->status_gpio)) {
3089 ret = mmc_cd_gpio_request(msm_host->mmc,
3090 msm_host->pdata->status_gpio);
3091 if (ret)
3092 pr_err("%s: %s: Failed to request card detection IRQ %d\n",
3093 mmc_hostname(host->mmc), __func__, ret);
3094 }
3095
Asutosh Dasbbc84782013-02-11 15:31:35 +05303096 if (pm_runtime_suspended(dev)) {
3097 pr_debug("%s: %s: runtime suspended, defer system resume\n",
3098 mmc_hostname(host->mmc), __func__);
3099 goto out;
3100 }
3101
3102 return sdhci_msm_runtime_resume(dev);
3103out:
3104 return ret;
3105}
3106#endif
3107
3108#ifdef CONFIG_PM
3109static const struct dev_pm_ops sdhci_msm_pmops = {
3110 SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
3111 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
3112 NULL)
3113};
3114
3115#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
3116
3117#else
3118#define SDHCI_PM_OPS NULL
3119#endif
Asutosh Das33a4ff52012-12-18 16:14:02 +05303120static const struct of_device_id sdhci_msm_dt_match[] = {
3121 {.compatible = "qcom,sdhci-msm"},
Sujit Reddy Thummae5594822013-11-28 08:51:19 +05303122 { /* sentinel */ }
Asutosh Das33a4ff52012-12-18 16:14:02 +05303123};
3124MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
3125
3126static struct platform_driver sdhci_msm_driver = {
3127 .probe = sdhci_msm_probe,
3128 .remove = __devexit_p(sdhci_msm_remove),
3129 .driver = {
3130 .name = "sdhci_msm",
3131 .owner = THIS_MODULE,
3132 .of_match_table = sdhci_msm_dt_match,
Asutosh Dasbbc84782013-02-11 15:31:35 +05303133 .pm = SDHCI_MSM_PMOPS,
Asutosh Das33a4ff52012-12-18 16:14:02 +05303134 },
3135};
3136
3137module_platform_driver(sdhci_msm_driver);
3138
3139MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
3140MODULE_LICENSE("GPL v2");