blob: 93c3af09b6d337d3e372df79c61bbb63e45ac200 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Subhash Jadavani9c807702017-04-01 00:35:51 -07002 * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070017#include <linux/iopoll.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020018#include <linux/platform_device.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070019
20#ifdef CONFIG_QCOM_BUS_SCALING
21#include <linux/msm-bus.h>
22#endif
23
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020024#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020025#include <linux/phy/phy-qcom-ufs.h>
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020026
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020027#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020028#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020029#include "unipro.h"
30#include "ufs-qcom.h"
31#include "ufshci.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070032#include "ufs_quirks.h"
33#include "ufs-qcom-ice.h"
34#include "ufs-qcom-debugfs.h"
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -070035#include <linux/clk/qcom.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070036
Subhash Jadavani9c807702017-04-01 00:35:51 -070037#define MAX_PROP_SIZE 32
38#define VDDP_REF_CLK_MIN_UV 1200000
39#define VDDP_REF_CLK_MAX_UV 1200000
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070040/* TODO: further tuning for this parameter may be required */
41#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
42
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020043#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
44 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
45
46enum {
47 TSTBUS_UAWM,
48 TSTBUS_UARM,
49 TSTBUS_TXUC,
50 TSTBUS_RXUC,
51 TSTBUS_DFC,
52 TSTBUS_TRLUT,
53 TSTBUS_TMRLUT,
54 TSTBUS_OCSC,
55 TSTBUS_UTP_HCI,
56 TSTBUS_COMBINED,
57 TSTBUS_WRAPPER,
58 TSTBUS_UNIPRO,
59 TSTBUS_MAX,
60};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020061
62static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
63
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070064static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020065static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020066static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
67 u32 clk_cycles);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070068static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020069
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020070static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
71 char *prefix)
72{
73 print_hex_dump(KERN_ERR, prefix,
74 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070075 16, 4, hba->mmio_base + offset, len * 4, false);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020076}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020077
Yaniv Gardieba5ed32016-03-10 17:37:21 +020078static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
79 char *prefix, void *priv)
80{
81 ufs_qcom_dump_regs(hba, offset, len, prefix);
82}
83
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020084static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
85{
86 int err = 0;
87
88 err = ufshcd_dme_get(hba,
89 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
90 if (err)
91 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
92 __func__, err);
93
94 return err;
95}
96
97static int ufs_qcom_host_clk_get(struct device *dev,
98 const char *name, struct clk **clk_out)
99{
100 struct clk *clk;
101 int err = 0;
102
103 clk = devm_clk_get(dev, name);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700104 if (IS_ERR(clk))
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200105 err = PTR_ERR(clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700106 else
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200107 *clk_out = clk;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200108
109 return err;
110}
111
112static int ufs_qcom_host_clk_enable(struct device *dev,
113 const char *name, struct clk *clk)
114{
115 int err = 0;
116
117 err = clk_prepare_enable(clk);
118 if (err)
119 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
120
121 return err;
122}
123
124static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
125{
126 if (!host->is_lane_clks_enabled)
127 return;
128
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700129 if (host->tx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200130 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200131 clk_disable_unprepare(host->tx_l0_sync_clk);
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700132 if (host->rx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200133 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200134 clk_disable_unprepare(host->rx_l0_sync_clk);
135
136 host->is_lane_clks_enabled = false;
137}
138
139static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
140{
141 int err = 0;
142 struct device *dev = host->hba->dev;
143
144 if (host->is_lane_clks_enabled)
145 return 0;
146
147 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
148 host->rx_l0_sync_clk);
149 if (err)
150 goto out;
151
152 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
153 host->tx_l0_sync_clk);
154 if (err)
155 goto disable_rx_l0;
156
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200157 if (host->hba->lanes_per_direction > 1) {
158 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
159 host->rx_l1_sync_clk);
160 if (err)
161 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200162
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700163 /* The tx lane1 clk could be muxed, hence keep this optional */
164 if (host->tx_l1_sync_clk)
165 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
166 host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200167 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200168 host->is_lane_clks_enabled = true;
169 goto out;
170
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200171disable_tx_l0:
172 clk_disable_unprepare(host->tx_l0_sync_clk);
173disable_rx_l0:
174 clk_disable_unprepare(host->rx_l0_sync_clk);
175out:
176 return err;
177}
178
179static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
180{
181 int err = 0;
182 struct device *dev = host->hba->dev;
183
184 err = ufs_qcom_host_clk_get(dev,
185 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700186 if (err) {
187 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
188 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200189 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700190 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200191
192 err = ufs_qcom_host_clk_get(dev,
193 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700194 if (err) {
195 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
196 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200197 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700198 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200199
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200200 /* In case of single lane per direction, don't read lane1 clocks */
201 if (host->hba->lanes_per_direction > 1) {
202 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
203 &host->rx_l1_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700204 if (err) {
205 dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
206 __func__, err);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200207 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700208 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200209
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700210 /* The tx lane1 clk could be muxed, hence keep this optional */
211 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
212 &host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200213 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200214out:
215 return err;
216}
217
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200218static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
219{
220 int err;
221 u32 tx_fsm_val = 0;
222 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
223
224 do {
225 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200226 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
227 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
228 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200229 if (err || tx_fsm_val == TX_FSM_HIBERN8)
230 break;
231
232 /* sleep for max. 200us */
233 usleep_range(100, 200);
234 } while (time_before(jiffies, timeout));
235
236 /*
237 * we might have scheduled out for long during polling so
238 * check the state again.
239 */
240 if (time_after(jiffies, timeout))
241 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200242 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
243 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
244 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200245
246 if (err) {
247 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
248 __func__, err);
249 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
250 err = tx_fsm_val;
251 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
252 __func__, err);
253 }
254
255 return err;
256}
257
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200258static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
259{
260 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
261 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
262 REG_UFS_CFG1);
263 /* make sure above configuration is applied before we return */
264 mb();
265}
266
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200267static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
268{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200269 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200270 struct phy *phy = host->generic_phy;
271 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200272 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
273 ? true : false;
274
275 /* Assert PHY reset and apply PHY calibration values */
276 ufs_qcom_assert_reset(hba);
277 /* provide 1ms delay to let the reset pulse propagate */
278 usleep_range(1000, 1100);
279
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200280 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200281
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200282 if (ret) {
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200283 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
284 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200285 goto out;
286 }
287
288 /* De-assert PHY reset and start serdes */
289 ufs_qcom_deassert_reset(hba);
290
291 /*
292 * after reset deassertion, phy will need all ref clocks,
293 * voltage, current to settle down before starting serdes.
294 */
295 usleep_range(1000, 1100);
296 ret = ufs_qcom_phy_start_serdes(phy);
297 if (ret) {
298 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
299 __func__, ret);
300 goto out;
301 }
302
303 ret = ufs_qcom_phy_is_pcs_ready(phy);
304 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700305 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200306 __func__, ret);
307
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200308 ufs_qcom_select_unipro_mode(host);
309
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200310out:
311 return ret;
312}
313
314/*
315 * The UTP controller has a number of internal clock gating cells (CGCs).
316 * Internal hardware sub-modules within the UTP controller control the CGCs.
317 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
318 * in a specific operation, UTP controller CGCs are by default disabled and
319 * this function enables them (after every UFS link startup) to save some power
320 * leakage.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700321 *
322 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
323 * in Qunipro, enable them to save additional power.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200324 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700325static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200326{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700327 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
328 int err = 0;
329
330 /* Enable UTP internal clock gating */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200331 ufshcd_writel(hba,
332 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
333 REG_UFS_CFG2);
334
335 /* Ensure that HW clock gating is enabled before next operations */
336 mb();
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700337
338 /* Enable Qunipro internal clock gating if supported */
339 if (!ufs_qcom_cap_qunipro_clk_gating(host))
340 goto out;
341
342 /* Enable all the mask bits */
343 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
344 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
345 if (err)
346 goto out;
347
348 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
349 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
350 if (err)
351 goto out;
352
353 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
354 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
355 DME_VS_CORE_CLK_CTRL);
356out:
357 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200358}
359
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -0700360static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
361{
362 struct ufs_clk_info *clki;
363
364 /*
365 * Configure the behavior of ufs clocks core and peripheral
366 * memory state when they are turned off.
367 * This configuration is required to allow retaining
368 * ICE crypto configuration (including keys) when
369 * core_clk_ice is turned off, and powering down
370 * non-ICE RAMs of host controller.
371 */
372 list_for_each_entry(clki, &hba->clk_list_head, list) {
373 if (!strcmp(clki->name, "core_clk_ice"))
374 clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
375 else
376 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
377 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
378 clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
379 }
380}
381
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200382static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
383 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200384{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200385 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200386 int err = 0;
387
388 switch (status) {
389 case PRE_CHANGE:
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -0700390 ufs_qcom_force_mem_config(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200391 ufs_qcom_power_up_sequence(hba);
392 /*
393 * The PHY PLL output is the source of tx/rx lane symbol
394 * clocks, hence, enable the lane clocks only after PHY
395 * is initialized.
396 */
397 err = ufs_qcom_enable_lane_clks(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700398 if (!err && host->ice.pdev) {
399 err = ufs_qcom_ice_init(host);
400 if (err) {
401 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
402 __func__, err);
403 err = -EINVAL;
404 }
405 }
406
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200407 break;
408 case POST_CHANGE:
409 /* check if UFS PHY moved from DISABLED to HIBERN8 */
410 err = ufs_qcom_check_hibern8(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200411 break;
412 default:
413 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
414 err = -EINVAL;
415 break;
416 }
417 return err;
418}
419
420/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200421 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200422 */
Subhash Jadavani9c807702017-04-01 00:35:51 -0700423static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
424 u32 hs, u32 rate, bool update_link_startup_timer,
425 bool is_pre_scale_up)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200426{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200427 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200428 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200429 struct ufs_clk_info *clki;
430 u32 core_clk_period_in_ns;
431 u32 tx_clk_cycles_per_us = 0;
432 unsigned long core_clk_rate = 0;
433 u32 core_clk_cycles_per_us = 0;
434
435 static u32 pwm_fr_table[][2] = {
436 {UFS_PWM_G1, 0x1},
437 {UFS_PWM_G2, 0x1},
438 {UFS_PWM_G3, 0x1},
439 {UFS_PWM_G4, 0x1},
440 };
441
442 static u32 hs_fr_table_rA[][2] = {
443 {UFS_HS_G1, 0x1F},
444 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200445 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200446 };
447
448 static u32 hs_fr_table_rB[][2] = {
449 {UFS_HS_G1, 0x24},
450 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200451 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200452 };
453
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300454 /*
455 * The Qunipro controller does not use following registers:
456 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
457 * UFS_REG_PA_LINK_STARTUP_TIMER
458 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700459 * Aggregation / Auto hibern8 logic.
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300460 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700461 if (ufs_qcom_cap_qunipro(host) &&
462 (!(ufshcd_is_intr_aggr_allowed(hba) ||
463 ufshcd_is_auto_hibern8_supported(hba))))
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300464 goto out;
465
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200466 if (gear == 0) {
467 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
468 goto out_error;
469 }
470
471 list_for_each_entry(clki, &hba->clk_list_head, list) {
Subhash Jadavani9c807702017-04-01 00:35:51 -0700472 if (!strcmp(clki->name, "core_clk")) {
473 if (is_pre_scale_up)
474 core_clk_rate = clki->max_freq;
475 else
476 core_clk_rate = clk_get_rate(clki->clk);
477 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200478 }
479
480 /* If frequency is smaller than 1MHz, set to 1MHz */
481 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
482 core_clk_rate = DEFAULT_CLK_RATE_HZ;
483
484 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200485 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
486 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
487 /*
488 * make sure above write gets applied before we return from
489 * this function.
490 */
491 mb();
492 }
493
494 if (ufs_qcom_cap_qunipro(host))
495 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200496
497 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
498 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
499 core_clk_period_in_ns &= MASK_CLK_NS_REG;
500
501 switch (hs) {
502 case FASTAUTO_MODE:
503 case FAST_MODE:
504 if (rate == PA_HS_MODE_A) {
505 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
506 dev_err(hba->dev,
507 "%s: index %d exceeds table size %zu\n",
508 __func__, gear,
509 ARRAY_SIZE(hs_fr_table_rA));
510 goto out_error;
511 }
512 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
513 } else if (rate == PA_HS_MODE_B) {
514 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
515 dev_err(hba->dev,
516 "%s: index %d exceeds table size %zu\n",
517 __func__, gear,
518 ARRAY_SIZE(hs_fr_table_rB));
519 goto out_error;
520 }
521 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
522 } else {
523 dev_err(hba->dev, "%s: invalid rate = %d\n",
524 __func__, rate);
525 goto out_error;
526 }
527 break;
528 case SLOWAUTO_MODE:
529 case SLOW_MODE:
530 if (gear > ARRAY_SIZE(pwm_fr_table)) {
531 dev_err(hba->dev,
532 "%s: index %d exceeds table size %zu\n",
533 __func__, gear,
534 ARRAY_SIZE(pwm_fr_table));
535 goto out_error;
536 }
537 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
538 break;
539 case UNCHANGED:
540 default:
541 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
542 goto out_error;
543 }
544
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200545 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
546 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
547 /* this register 2 fields shall be written at once */
548 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
549 REG_UFS_TX_SYMBOL_CLK_NS_US);
550 /*
551 * make sure above write gets applied before we return from
552 * this function.
553 */
554 mb();
555 }
556
557 if (update_link_startup_timer) {
558 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
559 REG_UFS_PA_LINK_STARTUP_TIMER);
560 /*
561 * make sure that this configuration is applied before
562 * we return
563 */
564 mb();
565 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200566 goto out;
567
568out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200569 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200570out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200571 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200572}
573
Subhash Jadavani9c807702017-04-01 00:35:51 -0700574static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
575 u32 hs, u32 rate, bool update_link_startup_timer)
576{
577 return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
578 update_link_startup_timer, false);
579}
580
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700581static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
582{
583 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
584 struct phy *phy = host->generic_phy;
585 u32 unipro_ver;
586 int err = 0;
587
588 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
589 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
590 __func__);
591 err = -EINVAL;
592 goto out;
593 }
594
595 /* make sure RX LineCfg is enabled before link startup */
596 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
597 if (err)
598 goto out;
599
600 if (ufs_qcom_cap_qunipro(host)) {
601 /*
602 * set unipro core clock cycles to 150 & clear clock divider
603 */
604 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
605 if (err)
606 goto out;
607 }
608
609 err = ufs_qcom_enable_hw_clk_gating(hba);
610 if (err)
611 goto out;
612
613 /*
614 * Some UFS devices (and may be host) have issues if LCC is
615 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
616 * before link startup which will make sure that both host
617 * and device TX LCC are disabled once link startup is
618 * completed.
619 */
620 unipro_ver = ufshcd_get_local_unipro_ver(hba);
621 if (unipro_ver != UFS_UNIPRO_VER_1_41)
622 err = ufshcd_dme_set(hba,
623 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
624 0);
625 if (err)
626 goto out;
627
628 if (!ufs_qcom_cap_qunipro_clk_gating(host))
629 goto out;
630
631 /* Enable all the mask bits */
632 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
633 SAVECONFIGTIME_MODE_MASK,
634 PA_VS_CONFIG_REG1);
635out:
636 return err;
637}
638
639static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
640{
641 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
642 struct phy *phy = host->generic_phy;
643 u32 tx_lanes;
644 int err = 0;
645
646 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
647 if (err)
648 goto out;
649
650 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
651 if (err) {
652 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
653 __func__);
654 goto out;
655 }
656
657 /*
658 * Some UFS devices send incorrect LineCfg data as part of power mode
659 * change sequence which may cause host PHY to go into bad state.
660 * Disabling Rx LineCfg of host PHY should help avoid this.
661 */
662 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
663 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
664 if (err) {
665 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
666 __func__);
667 goto out;
668 }
669
670 /*
671 * UFS controller has *clk_req output to GCC, for each one if the clocks
672 * entering it. When *clk_req for a specific clock is de-asserted,
673 * a corresponding clock from GCC is stopped. UFS controller de-asserts
674 * *clk_req outputs when it is in Auto Hibernate state only if the
675 * Clock request feature is enabled.
676 * Enable the Clock request feature:
677 * - Enable HW clock control for UFS clocks in GCC (handled by the
678 * clock driver as part of clk_prepare_enable).
679 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
680 */
681 if (ufshcd_is_auto_hibern8_supported(hba))
682 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
683 UFS_HW_CLK_CTRL_EN,
684 UFS_AH8_CFG);
685 /*
686 * Make sure clock request feature gets enabled for HW clk gating
687 * before further operations.
688 */
689 mb();
690
691out:
692 return err;
693}
694
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200695static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
696 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200697{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200698 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200699
700 switch (status) {
701 case PRE_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700702 err = ufs_qcom_link_startup_pre_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200703 break;
704 case POST_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700705 err = ufs_qcom_link_startup_post_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200706 break;
707 default:
708 break;
709 }
710
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200711 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200712}
713
Subhash Jadavani9c807702017-04-01 00:35:51 -0700714
715static int ufs_qcom_config_vreg(struct device *dev,
716 struct ufs_vreg *vreg, bool on)
717{
718 int ret = 0;
719 struct regulator *reg;
720 int min_uV, uA_load;
721
722 if (!vreg) {
723 WARN_ON(1);
724 ret = -EINVAL;
725 goto out;
726 }
727
728 reg = vreg->reg;
729 if (regulator_count_voltages(reg) > 0) {
730 min_uV = on ? vreg->min_uV : 0;
731 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
732 if (ret) {
733 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
734 __func__, vreg->name, ret);
735 goto out;
736 }
737
738 uA_load = on ? vreg->max_uA : 0;
739 ret = regulator_set_load(vreg->reg, uA_load);
740 if (ret)
741 goto out;
742 }
743out:
744 return ret;
745}
746
747static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
748{
749 int ret = 0;
750
751 if (vreg->enabled)
752 return ret;
753
754 ret = ufs_qcom_config_vreg(dev, vreg, true);
755 if (ret)
756 goto out;
757
758 ret = regulator_enable(vreg->reg);
759 if (ret)
760 goto out;
761
762 vreg->enabled = true;
763out:
764 return ret;
765}
766
767static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
768{
769 int ret = 0;
770
771 if (!vreg->enabled)
772 return ret;
773
774 ret = regulator_disable(vreg->reg);
775 if (ret)
776 goto out;
777
778 ret = ufs_qcom_config_vreg(dev, vreg, false);
779 if (ret)
780 goto out;
781
782 vreg->enabled = false;
783out:
784 return ret;
785}
786
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200787static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
788{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200789 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200790 struct phy *phy = host->generic_phy;
791 int ret = 0;
792
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200793 /*
Subhash Jadavani9c807702017-04-01 00:35:51 -0700794 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
795 * power rail and low noise analog power rail for PLL can be
796 * switched off.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200797 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200798 if (!ufs_qcom_is_link_active(hba)) {
799 ufs_qcom_disable_lane_clks(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200800 phy_power_off(phy);
801
Subhash Jadavani9c807702017-04-01 00:35:51 -0700802 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
803 ret = ufs_qcom_disable_vreg(hba->dev,
804 host->vddp_ref_clk);
805 ufs_qcom_ice_suspend(host);
806
807 if (ufs_qcom_is_link_off(hba)) {
808 /* Assert PHY soft reset */
809 ufs_qcom_assert_reset(hba);
810 goto out;
811 }
812 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700813 /* Unvote PM QoS */
814 ufs_qcom_pm_qos_suspend(host);
815
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200816out:
817 return ret;
818}
819
820static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
821{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200822 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200823 struct phy *phy = host->generic_phy;
824 int err;
825
826 err = phy_power_on(phy);
827 if (err) {
828 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
829 __func__, err);
830 goto out;
831 }
832
Subhash Jadavani9c807702017-04-01 00:35:51 -0700833 if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
834 hba->spm_lvl > UFS_PM_LVL_3))
835 ufs_qcom_enable_vreg(hba->dev,
836 host->vddp_ref_clk);
837
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200838 err = ufs_qcom_enable_lane_clks(host);
839 if (err)
840 goto out;
841
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700842 err = ufs_qcom_ice_resume(host);
843 if (err) {
844 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
845 __func__, err);
846 goto out;
847 }
848
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200849 hba->is_sys_suspended = false;
850
851out:
852 return err;
853}
854
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700855static int ufs_qcom_full_reset(struct ufs_hba *hba)
856{
Subhash Jadavani9c807702017-04-01 00:35:51 -0700857 int ret = -ENOTSUPP;
858
859 if (!hba->core_reset) {
860 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
861 ret);
862 goto out;
863 }
864
865 ret = reset_control_assert(hba->core_reset);
866 if (ret) {
867 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
868 __func__, ret);
869 goto out;
870 }
871
872 /*
873 * The hardware requirement for delay between assert/deassert
874 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
875 * ~125us (4/32768). To be on the safe side add 200us delay.
876 */
877 usleep_range(200, 210);
878
879 ret = reset_control_deassert(hba->core_reset);
880 if (ret)
881 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
882 __func__, ret);
883
884out:
885 return ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700886}
887
888#ifdef CONFIG_SCSI_UFS_QCOM_ICE
889static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
890 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
891{
892 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
893 struct request *req;
894 int ret;
895
896 if (lrbp->cmd && lrbp->cmd->request)
897 req = lrbp->cmd->request;
898 else
899 return 0;
Neeraj Soni36c65122018-04-18 21:04:46 +0530900 /*
901 * Right now ICE do not support variable dun but can be
902 * taken as future enhancement
903 * if (bio_dun(req->bio)) {
904 * dun @bio can be split, so we have to adjust offset
905 * *dun = bio_dun(req->bio);
906 * } else
907 */
908 if (req->bio) {
909 *dun = req->bio->bi_iter.bi_sector;
910 *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
911 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700912
913 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
914
915 return ret;
916}
917
918static
919int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
920{
921 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
922 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
923 int err = 0;
924
925 if (!host->ice.pdev ||
926 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
927 goto out;
928
929 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
930out:
931 return err;
932}
933
934static
935int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
936 struct ufshcd_lrb *lrbp, struct request *req)
937{
938 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
939 int err = 0;
940
941 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
942 goto out;
943
944 err = ufs_qcom_ice_cfg_end(host, req);
945out:
946 return err;
947}
948
949static
950int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
951{
952 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
953 int err = 0;
954
955 if (!host->ice.pdev)
956 goto out;
957
958 err = ufs_qcom_ice_reset(host);
959out:
960 return err;
961}
962
963static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
964{
965 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
966
967 if (!status)
968 return -EINVAL;
969
970 return ufs_qcom_ice_get_status(host, status);
971}
972#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
973#define ufs_qcom_crypto_req_setup NULL
974#define ufs_qcom_crytpo_engine_cfg_start NULL
975#define ufs_qcom_crytpo_engine_cfg_end NULL
976#define ufs_qcom_crytpo_engine_reset NULL
977#define ufs_qcom_crypto_engine_get_status NULL
978#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
979
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200980struct ufs_qcom_dev_params {
981 u32 pwm_rx_gear; /* pwm rx gear to work in */
982 u32 pwm_tx_gear; /* pwm tx gear to work in */
983 u32 hs_rx_gear; /* hs rx gear to work in */
984 u32 hs_tx_gear; /* hs tx gear to work in */
985 u32 rx_lanes; /* number of rx lanes */
986 u32 tx_lanes; /* number of tx lanes */
987 u32 rx_pwr_pwm; /* rx pwm working pwr */
988 u32 tx_pwr_pwm; /* tx pwm working pwr */
989 u32 rx_pwr_hs; /* rx hs working pwr */
990 u32 tx_pwr_hs; /* tx hs working pwr */
991 u32 hs_rate; /* rate A/B to work in HS */
992 u32 desired_working_mode;
993};
994
995static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
996 struct ufs_pa_layer_attr *dev_max,
997 struct ufs_pa_layer_attr *agreed_pwr)
998{
999 int min_qcom_gear;
1000 int min_dev_gear;
1001 bool is_dev_sup_hs = false;
1002 bool is_qcom_max_hs = false;
1003
1004 if (dev_max->pwr_rx == FAST_MODE)
1005 is_dev_sup_hs = true;
1006
1007 if (qcom_param->desired_working_mode == FAST) {
1008 is_qcom_max_hs = true;
1009 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
1010 qcom_param->hs_tx_gear);
1011 } else {
1012 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
1013 qcom_param->pwm_tx_gear);
1014 }
1015
1016 /*
1017 * device doesn't support HS but qcom_param->desired_working_mode is
1018 * HS, thus device and qcom_param don't agree
1019 */
1020 if (!is_dev_sup_hs && is_qcom_max_hs) {
1021 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
1022 __func__);
1023 return -ENOTSUPP;
1024 } else if (is_dev_sup_hs && is_qcom_max_hs) {
1025 /*
1026 * since device supports HS, it supports FAST_MODE.
1027 * since qcom_param->desired_working_mode is also HS
1028 * then final decision (FAST/FASTAUTO) is done according
1029 * to qcom_params as it is the restricting factor
1030 */
1031 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1032 qcom_param->rx_pwr_hs;
1033 } else {
1034 /*
1035 * here qcom_param->desired_working_mode is PWM.
1036 * it doesn't matter whether device supports HS or PWM,
1037 * in both cases qcom_param->desired_working_mode will
1038 * determine the mode
1039 */
1040 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1041 qcom_param->rx_pwr_pwm;
1042 }
1043
1044 /*
1045 * we would like tx to work in the minimum number of lanes
1046 * between device capability and vendor preferences.
1047 * the same decision will be made for rx
1048 */
1049 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1050 qcom_param->tx_lanes);
1051 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1052 qcom_param->rx_lanes);
1053
1054 /* device maximum gear is the minimum between device rx and tx gears */
1055 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1056
1057 /*
1058 * if both device capabilities and vendor pre-defined preferences are
1059 * both HS or both PWM then set the minimum gear to be the chosen
1060 * working gear.
1061 * if one is PWM and one is HS then the one that is PWM get to decide
1062 * what is the gear, as it is the one that also decided previously what
1063 * pwr the device will be configured to.
1064 */
1065 if ((is_dev_sup_hs && is_qcom_max_hs) ||
1066 (!is_dev_sup_hs && !is_qcom_max_hs))
1067 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1068 min_t(u32, min_dev_gear, min_qcom_gear);
1069 else if (!is_dev_sup_hs)
1070 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1071 else
1072 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1073
1074 agreed_pwr->hs_rate = qcom_param->hs_rate;
1075 return 0;
1076}
1077
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001078#ifdef CONFIG_QCOM_BUS_SCALING
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001079static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1080 const char *speed_mode)
1081{
1082 struct device *dev = host->hba->dev;
1083 struct device_node *np = dev->of_node;
1084 int err;
1085 const char *key = "qcom,bus-vector-names";
1086
1087 if (!speed_mode) {
1088 err = -EINVAL;
1089 goto out;
1090 }
1091
1092 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1093 err = of_property_match_string(np, key, "MAX");
1094 else
1095 err = of_property_match_string(np, key, speed_mode);
1096
1097out:
1098 if (err < 0)
1099 dev_err(dev, "%s: Invalid %s mode %d\n",
1100 __func__, speed_mode, err);
1101 return err;
1102}
1103
1104static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1105{
1106 int gear = max_t(u32, p->gear_rx, p->gear_tx);
1107 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1108 int pwr;
1109
1110 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1111 if (!gear)
1112 gear = 1;
1113
1114 if (!lanes)
1115 lanes = 1;
1116
1117 if (!p->pwr_rx && !p->pwr_tx) {
1118 pwr = SLOWAUTO_MODE;
1119 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1120 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1121 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1122 pwr = FAST_MODE;
1123 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1124 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1125 } else {
1126 pwr = SLOW_MODE;
1127 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1128 "PWM", gear, lanes);
1129 }
1130}
1131
Subhash Jadavani9c807702017-04-01 00:35:51 -07001132static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001133{
1134 int err = 0;
1135
1136 if (vote != host->bus_vote.curr_vote) {
1137 err = msm_bus_scale_client_update_request(
1138 host->bus_vote.client_handle, vote);
1139 if (err) {
1140 dev_err(host->hba->dev,
1141 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1142 __func__, host->bus_vote.client_handle,
1143 vote, err);
1144 goto out;
1145 }
1146
1147 host->bus_vote.curr_vote = vote;
1148 }
1149out:
1150 return err;
1151}
1152
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001153static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1154{
1155 int vote;
1156 int err = 0;
1157 char mode[BUS_VECTOR_NAME_LEN];
1158
1159 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1160
1161 vote = ufs_qcom_get_bus_vote(host, mode);
1162 if (vote >= 0)
Subhash Jadavani9c807702017-04-01 00:35:51 -07001163 err = __ufs_qcom_set_bus_vote(host, vote);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001164 else
1165 err = vote;
1166
1167 if (err)
1168 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1169 else
1170 host->bus_vote.saved_vote = vote;
1171 return err;
1172}
1173
Subhash Jadavani9c807702017-04-01 00:35:51 -07001174static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1175{
1176 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1177 int vote, err;
1178
1179 /*
1180 * In case ufs_qcom_init() is not yet done, simply ignore.
1181 * This ufs_qcom_set_bus_vote() shall be called from
1182 * ufs_qcom_init() after init is done.
1183 */
1184 if (!host)
1185 return 0;
1186
1187 if (on) {
1188 vote = host->bus_vote.saved_vote;
1189 if (vote == host->bus_vote.min_bw_vote)
1190 ufs_qcom_update_bus_bw_vote(host);
1191 } else {
1192 vote = host->bus_vote.min_bw_vote;
1193 }
1194
1195 err = __ufs_qcom_set_bus_vote(host, vote);
1196 if (err)
1197 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1198 __func__, err);
1199
1200 return err;
1201}
1202
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001203static ssize_t
1204show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1205 char *buf)
1206{
1207 struct ufs_hba *hba = dev_get_drvdata(dev);
1208 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1209
1210 return snprintf(buf, PAGE_SIZE, "%u\n",
1211 host->bus_vote.is_max_bw_needed);
1212}
1213
1214static ssize_t
1215store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1216 const char *buf, size_t count)
1217{
1218 struct ufs_hba *hba = dev_get_drvdata(dev);
1219 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1220 uint32_t value;
1221
1222 if (!kstrtou32(buf, 0, &value)) {
1223 host->bus_vote.is_max_bw_needed = !!value;
1224 ufs_qcom_update_bus_bw_vote(host);
1225 }
1226
1227 return count;
1228}
1229
1230static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1231{
1232 int err;
1233 struct msm_bus_scale_pdata *bus_pdata;
1234 struct device *dev = host->hba->dev;
1235 struct platform_device *pdev = to_platform_device(dev);
1236 struct device_node *np = dev->of_node;
1237
1238 bus_pdata = msm_bus_cl_get_pdata(pdev);
1239 if (!bus_pdata) {
1240 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1241 err = -ENODATA;
1242 goto out;
1243 }
1244
1245 err = of_property_count_strings(np, "qcom,bus-vector-names");
1246 if (err < 0 || err != bus_pdata->num_usecases) {
1247 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1248 __func__, err);
1249 goto out;
1250 }
1251
1252 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1253 if (!host->bus_vote.client_handle) {
1254 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1255 __func__);
1256 err = -EFAULT;
1257 goto out;
1258 }
1259
1260 /* cache the vote index for minimum and maximum bandwidth */
1261 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1262 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1263
1264 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1265 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1266 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1267 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1268 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1269 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1270out:
1271 return err;
1272}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001273#else /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001274static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1275{
1276 return 0;
1277}
1278
Subhash Jadavani9c807702017-04-01 00:35:51 -07001279static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001280{
1281 return 0;
1282}
1283
1284static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1285{
1286 return 0;
1287}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001288static inline void msm_bus_scale_unregister_client(uint32_t cl)
1289{
1290}
1291#endif /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001292
1293static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1294{
1295 if (host->dev_ref_clk_ctrl_mmio &&
1296 (enable ^ host->is_dev_ref_clk_enabled)) {
1297 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1298
1299 if (enable)
1300 temp |= host->dev_ref_clk_en_mask;
1301 else
1302 temp &= ~host->dev_ref_clk_en_mask;
1303
1304 /*
1305 * If we are here to disable this clock it might be immediately
1306 * after entering into hibern8 in which case we need to make
1307 * sure that device ref_clk is active at least 1us after the
1308 * hibern8 enter.
1309 */
1310 if (!enable)
1311 udelay(1);
1312
1313 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1314
1315 /* ensure that ref_clk is enabled/disabled before we return */
1316 wmb();
1317
1318 /*
1319 * If we call hibern8 exit after this, we need to make sure that
1320 * device ref_clk is stable for at least 1us before the hibern8
1321 * exit command.
1322 */
1323 if (enable)
1324 udelay(1);
1325
1326 host->is_dev_ref_clk_enabled = enable;
1327 }
1328}
1329
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001330static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001331 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001332 struct ufs_pa_layer_attr *dev_max_params,
1333 struct ufs_pa_layer_attr *dev_req_params)
1334{
1335 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001336 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001337 struct phy *phy = host->generic_phy;
1338 struct ufs_qcom_dev_params ufs_qcom_cap;
1339 int ret = 0;
1340 int res = 0;
1341
1342 if (!dev_req_params) {
1343 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1344 ret = -EINVAL;
1345 goto out;
1346 }
1347
1348 switch (status) {
1349 case PRE_CHANGE:
1350 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1351 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1352 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1353 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1354 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1355 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1356 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1357 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1358 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1359 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1360 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1361 ufs_qcom_cap.desired_working_mode =
1362 UFS_QCOM_LIMIT_DESIRED_MODE;
1363
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001364 if (host->hw_ver.major == 0x1) {
1365 /*
1366 * HS-G3 operations may not reliably work on legacy QCOM
1367 * UFS host controller hardware even though capability
1368 * exchange during link startup phase may end up
1369 * negotiating maximum supported gear as G3.
1370 * Hence downgrade the maximum supported gear to HS-G2.
1371 */
1372 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1373 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1374 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1375 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1376 }
1377
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001378 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1379 dev_max_params,
1380 dev_req_params);
1381 if (ret) {
1382 pr_err("%s: failed to determine capabilities\n",
1383 __func__);
1384 goto out;
1385 }
1386
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001387 /* enable the device ref clock before changing to HS mode */
1388 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1389 ufshcd_is_hs_mode(dev_req_params))
1390 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001391 break;
1392 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001393 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001394 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001395 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001396 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1397 __func__);
1398 /*
1399 * we return error code at the end of the routine,
1400 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1401 * and bus voting as usual
1402 */
1403 ret = -EINVAL;
1404 }
1405
1406 val = ~(MAX_U32 << dev_req_params->lane_tx);
1407 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1408 if (res) {
1409 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1410 __func__, res);
1411 ret = res;
1412 }
1413
1414 /* cache the power mode parameters to use internally */
1415 memcpy(&host->dev_req_params,
1416 dev_req_params, sizeof(*dev_req_params));
1417 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001418
1419 /* disable the device ref clock if entered PWM mode */
1420 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1421 !ufshcd_is_hs_mode(dev_req_params))
1422 ufs_qcom_dev_ref_clk_ctrl(host, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001423 break;
1424 default:
1425 ret = -EINVAL;
1426 break;
1427 }
1428out:
1429 return ret;
1430}
1431
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001432static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1433{
1434 int err;
1435 u32 pa_vs_config_reg1;
1436
1437 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1438 &pa_vs_config_reg1);
1439 if (err)
1440 goto out;
1441
1442 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1443 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1444 (pa_vs_config_reg1 | (1 << 12)));
1445
1446out:
1447 return err;
1448}
1449
1450static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1451{
1452 int err = 0;
1453
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08001454 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001455 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1456
1457 return err;
1458}
1459
Yaniv Gardiae977582015-05-17 18:55:06 +03001460static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1461{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001462 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001463
1464 if (host->hw_ver.major == 0x1)
1465 return UFSHCI_VERSION_11;
1466 else
1467 return UFSHCI_VERSION_20;
1468}
1469
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001470/**
1471 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1472 * @hba: host controller instance
1473 *
1474 * QCOM UFS host controller might have some non standard behaviours (quirks)
1475 * than what is specified by UFSHCI specification. Advertise all such
1476 * quirks to standard UFS host controller driver so standard takes them into
1477 * account.
1478 */
1479static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1480{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001481 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001482
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001483 if (host->hw_ver.major == 0x1) {
1484 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1485 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1486 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001487
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001488 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001489 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001490
1491 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001492 }
1493
Subhash Jadavanic04fcdd2016-08-05 11:20:10 -07001494 if (host->hw_ver.major == 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001495 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001496
Yaniv Gardicad2e032015-03-31 17:37:14 +03001497 if (!ufs_qcom_cap_qunipro(host))
1498 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001499 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001500 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001501 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001502 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001503
1504 if (host->disable_lpm)
1505 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
Yaniv Gardicad2e032015-03-31 17:37:14 +03001506}
1507
1508static void ufs_qcom_set_caps(struct ufs_hba *hba)
1509{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001510 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001511
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001512 if (!host->disable_lpm) {
1513 hba->caps |= UFSHCD_CAP_CLK_GATING;
1514 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1515 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1516 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001517 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001518
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001519 if (host->hw_ver.major >= 0x2) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001520 if (!host->disable_lpm)
1521 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001522 host->caps = UFS_QCOM_CAP_QUNIPRO |
1523 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001524 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001525 if (host->hw_ver.major >= 0x3) {
1526 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1527 /*
1528 * The UFS PHY attached to v3.0.0 controller supports entering
1529 * deeper low power state of SVS2. This lets the controller
1530 * run at much lower clock frequencies for saving power.
1531 * Assuming this and any future revisions of the controller
1532 * support this capability. Need to revist this assumption if
1533 * any future platform with this core doesn't support the
1534 * capability, as there will be no benefit running at lower
1535 * frequencies then.
1536 */
1537 host->caps |= UFS_QCOM_CAP_SVS2;
1538 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001539}
1540
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001541/**
1542 * ufs_qcom_setup_clocks - enables/disable clocks
1543 * @hba: host controller instance
1544 * @on: If true, enable clocks else disable them.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001545 * @is_gating_context: If true then it means this function is called from
1546 * aggressive clock gating context and we may only need to gate off important
1547 * clocks. If false then make sure to gate off all clocks.
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001548 *
1549 * Returns 0 on success, non-zero on failure.
1550 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001551static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1552 bool is_gating_context)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001553{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001554 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001555 int err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001556
1557 /*
1558 * In case ufs_qcom_init() is not yet done, simply ignore.
1559 * This ufs_qcom_setup_clocks() shall be called from
1560 * ufs_qcom_init() after init is done.
1561 */
1562 if (!host)
1563 return 0;
1564
1565 if (on) {
1566 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1567 if (err)
1568 goto out;
1569
1570 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1571 if (err) {
1572 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1573 __func__, err);
1574 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1575 goto out;
1576 }
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001577 /* enable the device ref clock for HS mode*/
1578 if (ufshcd_is_hs_mode(&hba->pwr_info))
1579 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001580
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001581 err = ufs_qcom_ice_resume(host);
1582 if (err)
1583 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001584 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001585 err = ufs_qcom_ice_suspend(host);
1586 if (err)
1587 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001588
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001589 /* M-PHY RMMI interface clocks can be turned off */
1590 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001591 /*
1592 * If auto hibern8 is supported then the link will already
1593 * be in hibern8 state and the ref clock can be gated.
1594 */
1595 if (ufshcd_is_auto_hibern8_supported(hba) ||
1596 !ufs_qcom_is_link_active(hba)) {
1597 /* turn off UFS local PHY ref_clk */
1598 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001599 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001600 ufs_qcom_dev_ref_clk_ctrl(host, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001601 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001602 }
1603
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001604out:
1605 return err;
1606}
1607
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001608#ifdef CONFIG_SMP /* CONFIG_SMP */
1609static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1610{
1611 int i;
1612
1613 if (cpu >= 0 && cpu < num_possible_cpus())
1614 for (i = 0; i < host->pm_qos.num_groups; i++)
1615 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1616 return i;
1617
1618 return host->pm_qos.default_cpu;
1619}
1620
1621static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1622{
1623 unsigned long flags;
1624 struct ufs_qcom_host *host;
1625 struct ufs_qcom_pm_qos_cpu_group *group;
1626
1627 if (!hba || !req)
1628 return;
1629
1630 host = ufshcd_get_variant(hba);
1631 if (!host->pm_qos.groups)
1632 return;
1633
1634 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1635
1636 spin_lock_irqsave(hba->host->host_lock, flags);
1637 if (!host->pm_qos.is_enabled)
1638 goto out;
1639
1640 group->active_reqs++;
1641 if (group->state != PM_QOS_REQ_VOTE &&
1642 group->state != PM_QOS_VOTED) {
1643 group->state = PM_QOS_REQ_VOTE;
1644 queue_work(host->pm_qos.workq, &group->vote_work);
1645 }
1646out:
1647 spin_unlock_irqrestore(hba->host->host_lock, flags);
1648}
1649
1650/* hba->host->host_lock is assumed to be held by caller */
1651static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1652{
1653 struct ufs_qcom_pm_qos_cpu_group *group;
1654
1655 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1656 return;
1657
1658 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1659
1660 if (--group->active_reqs)
1661 return;
1662 group->state = PM_QOS_REQ_UNVOTE;
1663 queue_work(host->pm_qos.workq, &group->unvote_work);
1664}
1665
1666static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1667 bool should_lock)
1668{
1669 unsigned long flags = 0;
1670
1671 if (!hba || !req)
1672 return;
1673
1674 if (should_lock)
1675 spin_lock_irqsave(hba->host->host_lock, flags);
1676 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1677 if (should_lock)
1678 spin_unlock_irqrestore(hba->host->host_lock, flags);
1679}
1680
1681static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1682{
1683 struct ufs_qcom_pm_qos_cpu_group *group =
1684 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1685 struct ufs_qcom_host *host = group->host;
1686 unsigned long flags;
1687
1688 spin_lock_irqsave(host->hba->host->host_lock, flags);
1689
1690 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1691 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1692 return;
1693 }
1694
1695 group->state = PM_QOS_VOTED;
1696 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1697
1698 pm_qos_update_request(&group->req, group->latency_us);
1699}
1700
1701static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1702{
1703 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1704 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1705 struct ufs_qcom_host *host = group->host;
1706 unsigned long flags;
1707
1708 /*
1709 * Check if new requests were submitted in the meantime and do not
1710 * unvote if so.
1711 */
1712 spin_lock_irqsave(host->hba->host->host_lock, flags);
1713
1714 if (!host->pm_qos.is_enabled || group->active_reqs) {
1715 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1716 return;
1717 }
1718
1719 group->state = PM_QOS_UNVOTED;
1720 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1721
1722 pm_qos_update_request_timeout(&group->req,
1723 group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
1724}
1725
1726static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1727 struct device_attribute *attr, char *buf)
1728{
1729 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1730 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1731
1732 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1733}
1734
1735static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1736 struct device_attribute *attr, const char *buf, size_t count)
1737{
1738 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1739 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1740 unsigned long value;
1741 unsigned long flags;
1742 bool enable;
1743 int i;
1744
1745 if (kstrtoul(buf, 0, &value))
1746 return -EINVAL;
1747
1748 enable = !!value;
1749
1750 /*
1751 * Must take the spinlock and save irqs before changing the enabled
1752 * flag in order to keep correctness of PM QoS release.
1753 */
1754 spin_lock_irqsave(hba->host->host_lock, flags);
1755 if (enable == host->pm_qos.is_enabled) {
1756 spin_unlock_irqrestore(hba->host->host_lock, flags);
1757 return count;
1758 }
1759 host->pm_qos.is_enabled = enable;
1760 spin_unlock_irqrestore(hba->host->host_lock, flags);
1761
1762 if (!enable)
1763 for (i = 0; i < host->pm_qos.num_groups; i++) {
1764 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1765 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1766 spin_lock_irqsave(hba->host->host_lock, flags);
1767 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1768 host->pm_qos.groups[i].active_reqs = 0;
1769 spin_unlock_irqrestore(hba->host->host_lock, flags);
1770 pm_qos_update_request(&host->pm_qos.groups[i].req,
1771 PM_QOS_DEFAULT_VALUE);
1772 }
1773
1774 return count;
1775}
1776
1777static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1778 struct device_attribute *attr, char *buf)
1779{
1780 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1781 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1782 int ret;
1783 int i;
1784 int offset = 0;
1785
1786 for (i = 0; i < host->pm_qos.num_groups; i++) {
1787 ret = snprintf(&buf[offset], PAGE_SIZE,
1788 "cpu group #%d(mask=0x%lx): %d\n", i,
1789 host->pm_qos.groups[i].mask.bits[0],
1790 host->pm_qos.groups[i].latency_us);
1791 if (ret > 0)
1792 offset += ret;
1793 else
1794 break;
1795 }
1796
1797 return offset;
1798}
1799
1800static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1801 struct device_attribute *attr, const char *buf, size_t count)
1802{
1803 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1804 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1805 unsigned long value;
1806 unsigned long flags;
1807 char *strbuf;
1808 char *strbuf_copy;
1809 char *token;
1810 int i;
1811 int ret;
1812
1813 /* reserve one byte for null termination */
1814 strbuf = kmalloc(count + 1, GFP_KERNEL);
1815 if (!strbuf)
1816 return -ENOMEM;
1817 strbuf_copy = strbuf;
1818 strlcpy(strbuf, buf, count + 1);
1819
1820 for (i = 0; i < host->pm_qos.num_groups; i++) {
1821 token = strsep(&strbuf, ",");
1822 if (!token)
1823 break;
1824
1825 ret = kstrtoul(token, 0, &value);
1826 if (ret)
1827 break;
1828
1829 spin_lock_irqsave(hba->host->host_lock, flags);
1830 host->pm_qos.groups[i].latency_us = value;
1831 spin_unlock_irqrestore(hba->host->host_lock, flags);
1832 }
1833
1834 kfree(strbuf_copy);
1835 return count;
1836}
1837
1838static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1839{
1840 struct device_node *node = host->hba->dev->of_node;
1841 struct device_attribute *attr;
1842 int ret = 0;
1843 int num_groups;
1844 int num_values;
1845 char wq_name[sizeof("ufs_pm_qos_00")];
1846 int i;
1847
1848 num_groups = of_property_count_u32_elems(node,
1849 "qcom,pm-qos-cpu-groups");
1850 if (num_groups <= 0)
1851 goto no_pm_qos;
1852
1853 num_values = of_property_count_u32_elems(node,
1854 "qcom,pm-qos-cpu-group-latency-us");
1855 if (num_values <= 0)
1856 goto no_pm_qos;
1857
1858 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1859 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1860 __func__, num_groups, num_values, num_possible_cpus());
1861 goto no_pm_qos;
1862 }
1863
1864 host->pm_qos.num_groups = num_groups;
1865 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1866 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1867 if (!host->pm_qos.groups)
1868 return -ENOMEM;
1869
1870 for (i = 0; i < host->pm_qos.num_groups; i++) {
1871 u32 mask;
1872
1873 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1874 i, &mask);
1875 if (ret)
1876 goto free_groups;
1877 host->pm_qos.groups[i].mask.bits[0] = mask;
1878 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1879 cpu_possible_mask)) {
1880 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1881 __func__, mask);
1882 goto free_groups;
1883 }
1884
1885 ret = of_property_read_u32_index(node,
1886 "qcom,pm-qos-cpu-group-latency-us", i,
1887 &host->pm_qos.groups[i].latency_us);
1888 if (ret)
1889 goto free_groups;
1890
1891 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
1892 host->pm_qos.groups[i].req.cpus_affine =
1893 host->pm_qos.groups[i].mask;
1894 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1895 host->pm_qos.groups[i].active_reqs = 0;
1896 host->pm_qos.groups[i].host = host;
1897
1898 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1899 ufs_qcom_pm_qos_vote_work);
1900 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1901 ufs_qcom_pm_qos_unvote_work);
1902 }
1903
1904 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1905 &host->pm_qos.default_cpu);
1906 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1907 host->pm_qos.default_cpu = 0;
1908
1909 /*
1910 * Use a single-threaded workqueue to assure work submitted to the queue
1911 * is performed in order. Consider the following 2 possible cases:
1912 *
1913 * 1. A new request arrives and voting work is scheduled for it. Before
1914 * the voting work is performed the request is finished and unvote
1915 * work is also scheduled.
1916 * 2. A request is finished and unvote work is scheduled. Before the
1917 * work is performed a new request arrives and voting work is also
1918 * scheduled.
1919 *
1920 * In both cases a vote work and unvote work wait to be performed.
1921 * If ordering is not guaranteed, then the end state might be the
1922 * opposite of the desired state.
1923 */
1924 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1925 host->hba->host->host_no);
1926 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1927 if (!host->pm_qos.workq) {
1928 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1929 __func__);
1930 ret = -ENOMEM;
1931 goto free_groups;
1932 }
1933
1934 /* Initialization was ok, add all PM QoS requests */
1935 for (i = 0; i < host->pm_qos.num_groups; i++)
1936 pm_qos_add_request(&host->pm_qos.groups[i].req,
1937 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1938
1939 /* PM QoS latency sys-fs attribute */
1940 attr = &host->pm_qos.latency_attr;
1941 attr->show = ufs_qcom_pm_qos_latency_show;
1942 attr->store = ufs_qcom_pm_qos_latency_store;
1943 sysfs_attr_init(&attr->attr);
1944 attr->attr.name = "pm_qos_latency_us";
1945 attr->attr.mode = S_IRUGO | S_IWUSR;
1946 if (device_create_file(host->hba->var->dev, attr))
1947 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1948
1949 /* PM QoS enable sys-fs attribute */
1950 attr = &host->pm_qos.enable_attr;
1951 attr->show = ufs_qcom_pm_qos_enable_show;
1952 attr->store = ufs_qcom_pm_qos_enable_store;
1953 sysfs_attr_init(&attr->attr);
1954 attr->attr.name = "pm_qos_enable";
1955 attr->attr.mode = S_IRUGO | S_IWUSR;
1956 if (device_create_file(host->hba->var->dev, attr))
1957 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1958
1959 host->pm_qos.is_enabled = true;
1960
1961 return 0;
1962
1963free_groups:
1964 kfree(host->pm_qos.groups);
1965no_pm_qos:
1966 host->pm_qos.groups = NULL;
1967 return ret ? ret : -ENOTSUPP;
1968}
1969
1970static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1971{
1972 int i;
1973
1974 if (!host->pm_qos.groups)
1975 return;
1976
1977 for (i = 0; i < host->pm_qos.num_groups; i++)
1978 flush_work(&host->pm_qos.groups[i].unvote_work);
1979}
1980
1981static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1982{
1983 int i;
1984
1985 if (!host->pm_qos.groups)
1986 return;
1987
1988 for (i = 0; i < host->pm_qos.num_groups; i++)
1989 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1990 destroy_workqueue(host->pm_qos.workq);
1991
1992 kfree(host->pm_qos.groups);
1993 host->pm_qos.groups = NULL;
1994}
1995#endif /* CONFIG_SMP */
1996
Asutosh Das134636a2017-06-07 11:47:42 +05301997#define ANDROID_BOOT_DEV_MAX 30
1998static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1999
2000#ifndef MODULE
2001static int __init get_android_boot_dev(char *str)
2002{
2003 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
2004 return 1;
2005}
2006__setup("androidboot.bootdevice=", get_android_boot_dev);
2007#endif
2008
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002009/*
2010 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
2011 */
2012static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
2013{
2014 struct device_node *node = host->hba->dev->of_node;
2015
2016 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
2017 if (host->disable_lpm)
2018 pr_info("%s: will disable all LPM modes\n", __func__);
2019}
2020
Subhash Jadavania889db02016-12-09 10:24:58 -08002021static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
2022{
2023 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2024 int id;
2025
2026 if (!hba->dev->of_node)
2027 return;
2028
2029 /* Extract platform data */
2030 id = of_alias_get_id(hba->dev->of_node, "ufshc");
2031 if (id <= 0)
2032 dev_err(hba->dev, "Failed to get host index %d\n", id);
2033 else if (id <= MAX_UFS_QCOM_HOSTS)
2034 ufs_qcom_hosts[id - 1] = host;
2035 else
2036 dev_err(hba->dev, "invalid host index %d\n", id);
2037}
2038
Subhash Jadavani9c807702017-04-01 00:35:51 -07002039static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
2040 struct ufs_vreg **out_vreg)
2041{
2042 int ret = 0;
2043 char prop_name[MAX_PROP_SIZE];
2044 struct ufs_vreg *vreg = NULL;
2045 struct device *dev = host->hba->dev;
2046 struct device_node *np = dev->of_node;
2047
2048 if (!np) {
2049 dev_err(dev, "%s: non DT initialization\n", __func__);
2050 goto out;
2051 }
2052
2053 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2054 if (!of_parse_phandle(np, prop_name, 0)) {
2055 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2056 __func__, prop_name);
2057 ret = -ENODEV;
2058 goto out;
2059 }
2060
2061 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2062 if (!vreg)
2063 return -ENOMEM;
2064
2065 vreg->name = name;
2066
2067 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2068 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2069 if (ret) {
2070 dev_err(dev, "%s: unable to find %s err %d\n",
2071 __func__, prop_name, ret);
2072 goto out;
2073 }
2074
2075 vreg->reg = devm_regulator_get(dev, vreg->name);
2076 if (IS_ERR(vreg->reg)) {
2077 ret = PTR_ERR(vreg->reg);
2078 dev_err(dev, "%s: %s get failed, err=%d\n",
2079 __func__, vreg->name, ret);
2080 }
2081 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2082 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2083
2084out:
2085 if (!ret)
2086 *out_vreg = vreg;
2087 return ret;
2088}
2089
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002090/**
2091 * ufs_qcom_init - bind phy with controller
2092 * @hba: host controller instance
2093 *
2094 * Binds PHY with controller and powers up PHY enabling clocks
2095 * and regulators.
2096 *
2097 * Returns -EPROBE_DEFER if binding fails, returns negative error
2098 * on phy power up failure and returns zero on success.
2099 */
2100static int ufs_qcom_init(struct ufs_hba *hba)
2101{
2102 int err;
2103 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002104 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002105 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002106 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002107
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002108 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2109 if (!host) {
2110 err = -ENOMEM;
2111 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2112 goto out;
2113 }
2114
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002115 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002116 host->hba = hba;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002117 spin_lock_init(&host->ice_work_lock);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002118
Subhash Jadavani9c807702017-04-01 00:35:51 -07002119 ufshcd_set_variant(hba, host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002120
2121 err = ufs_qcom_ice_get_dev(host);
2122 if (err == -EPROBE_DEFER) {
2123 /*
2124 * UFS driver might be probed before ICE driver does.
2125 * In that case we would like to return EPROBE_DEFER code
2126 * in order to delay its probing.
2127 */
2128 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2129 __func__, err);
2130 goto out_host_free;
2131
2132 } else if (err == -ENODEV) {
2133 /*
2134 * ICE device is not enabled in DTS file. No need for further
2135 * initialization of ICE driver.
2136 */
2137 dev_warn(dev, "%s: ICE device is not enabled",
2138 __func__);
2139 } else if (err) {
2140 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2141 __func__, err);
2142 goto out_host_free;
Neeraj Soni36c65122018-04-18 21:04:46 +05302143 } else {
2144 hba->host->inlinecrypt_support = 1;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002145 }
2146
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002147 host->generic_phy = devm_phy_get(dev, "ufsphy");
2148
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002149 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2150 /*
2151 * UFS driver might be probed before the phy driver does.
2152 * In that case we would like to return EPROBE_DEFER code.
2153 */
2154 err = -EPROBE_DEFER;
2155 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2156 __func__, err);
2157 goto out_host_free;
2158 } else if (IS_ERR(host->generic_phy)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002159 err = PTR_ERR(host->generic_phy);
2160 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
2161 goto out;
2162 }
2163
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002164 err = ufs_qcom_pm_qos_init(host);
2165 if (err)
2166 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2167
2168 /* restore the secure configuration */
2169 ufs_qcom_update_sec_cfg(hba, true);
2170
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002171 err = ufs_qcom_bus_register(host);
2172 if (err)
2173 goto out_host_free;
2174
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002175 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2176 &host->hw_ver.minor, &host->hw_ver.step);
2177
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002178 /*
2179 * for newer controllers, device reference clock control bit has
2180 * moved inside UFS controller register address space itself.
2181 */
2182 if (host->hw_ver.major >= 0x02) {
2183 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2184 host->dev_ref_clk_en_mask = BIT(26);
2185 } else {
2186 /* "dev_ref_clk_ctrl_mem" is optional resource */
2187 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2188 if (res) {
2189 host->dev_ref_clk_ctrl_mmio =
2190 devm_ioremap_resource(dev, res);
2191 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2192 dev_warn(dev,
2193 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2194 __func__,
2195 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2196 host->dev_ref_clk_ctrl_mmio = NULL;
2197 }
2198 host->dev_ref_clk_en_mask = BIT(5);
2199 }
2200 }
2201
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002202 /* update phy revision information before calling phy_init() */
2203 ufs_qcom_phy_save_controller_version(host->generic_phy,
2204 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2205
Subhash Jadavani9c807702017-04-01 00:35:51 -07002206 err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2207 &host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002208 phy_init(host->generic_phy);
2209 err = phy_power_on(host->generic_phy);
2210 if (err)
2211 goto out_unregister_bus;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002212 if (host->vddp_ref_clk) {
2213 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2214 if (err) {
2215 dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2216 __func__, err);
2217 goto out_disable_phy;
2218 }
2219 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002220
2221 err = ufs_qcom_init_lane_clks(host);
2222 if (err)
Subhash Jadavani9c807702017-04-01 00:35:51 -07002223 goto out_disable_vddp;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002224
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002225 ufs_qcom_parse_lpm(host);
2226 if (host->disable_lpm)
2227 pm_runtime_forbid(host->hba->dev);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002228 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002229 ufs_qcom_advertise_quirks(hba);
2230
Subhash Jadavani9c807702017-04-01 00:35:51 -07002231 ufs_qcom_set_bus_vote(hba, true);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002232 ufs_qcom_setup_clocks(hba, true, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002233
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002234 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2235 ufs_qcom_get_default_testbus_cfg(host);
2236 err = ufs_qcom_testbus_config(host);
2237 if (err) {
2238 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2239 __func__, err);
2240 err = 0;
2241 }
2242
Subhash Jadavania889db02016-12-09 10:24:58 -08002243 ufs_qcom_save_host_ptr(hba);
2244
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002245 goto out;
2246
Subhash Jadavani9c807702017-04-01 00:35:51 -07002247out_disable_vddp:
2248 if (host->vddp_ref_clk)
2249 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002250out_disable_phy:
2251 phy_power_off(host->generic_phy);
2252out_unregister_bus:
2253 phy_exit(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002254 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002255out_host_free:
2256 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002257 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002258out:
2259 return err;
2260}
2261
2262static void ufs_qcom_exit(struct ufs_hba *hba)
2263{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002264 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002265
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002266 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002267 ufs_qcom_disable_lane_clks(host);
2268 phy_power_off(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002269 ufs_qcom_pm_qos_remove(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002270}
2271
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002272static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2273 u32 clk_cycles)
2274{
2275 int err;
2276 u32 core_clk_ctrl_reg;
2277
2278 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2279 return -EINVAL;
2280
2281 err = ufshcd_dme_get(hba,
2282 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2283 &core_clk_ctrl_reg);
2284 if (err)
2285 goto out;
2286
2287 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2288 core_clk_ctrl_reg |= clk_cycles;
2289
2290 /* Clear CORE_CLK_DIV_EN */
2291 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2292
2293 err = ufshcd_dme_set(hba,
2294 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2295 core_clk_ctrl_reg);
2296out:
2297 return err;
2298}
2299
2300static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2301{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002302 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002303 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2304 int err = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002305
2306 if (!ufs_qcom_cap_qunipro(host))
Subhash Jadavani9c807702017-04-01 00:35:51 -07002307 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002308
Subhash Jadavani9c807702017-04-01 00:35:51 -07002309 if (attr)
2310 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2311 attr->hs_rate, false, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002312
2313 /* set unipro core clock cycles to 150 and clear clock divider */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002314 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2315out:
2316 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002317}
2318
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002319static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2320{
2321 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002322 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002323 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002324
2325 if (!ufs_qcom_cap_qunipro(host))
2326 return 0;
2327
Subhash Jadavani9c807702017-04-01 00:35:51 -07002328 if (attr)
2329 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2330 attr->hs_rate, false);
2331
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002332 if (ufs_qcom_cap_svs2(host))
2333 /*
2334 * For SVS2 set unipro core clock cycles to 37 and
2335 * clear clock divider
2336 */
2337 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2338 else
2339 /*
2340 * For SVS set unipro core clock cycles to 75 and
2341 * clear clock divider
2342 */
2343 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2344
2345 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002346}
2347
2348static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2349 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002350{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002351 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002352 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002353
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002354 switch (status) {
2355 case PRE_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002356 if (scale_up)
2357 err = ufs_qcom_clk_scale_up_pre_change(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002358 break;
2359 case POST_CHANGE:
Subhash Jadavani9c807702017-04-01 00:35:51 -07002360 if (!scale_up)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002361 err = ufs_qcom_clk_scale_down_post_change(hba);
2362
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002363 ufs_qcom_update_bus_bw_vote(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002364 break;
2365 default:
2366 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2367 err = -EINVAL;
2368 break;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002369 }
2370
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002371 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002372}
2373
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002374/*
2375 * This function should be called to restore the security configuration of UFS
2376 * register space after coming out of UFS host core power collapse.
2377 *
2378 * @hba: host controller instance
2379 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2380 * and set "false" when secure configuration is lost.
2381 */
2382static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2383{
2384 return 0;
2385}
2386
2387
2388static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2389{
2390 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2391
2392 if (ufs_qcom_cap_svs2(host))
2393 return UFS_HS_G1;
2394 /* Default SVS support @ HS G2 frequencies*/
2395 return UFS_HS_G2;
2396}
2397
2398void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2399 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2400 char *str, void *priv))
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002401{
2402 u32 reg;
2403 struct ufs_qcom_host *host;
2404
2405 if (unlikely(!hba)) {
2406 pr_err("%s: hba is NULL\n", __func__);
2407 return;
2408 }
2409 if (unlikely(!print_fn)) {
2410 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2411 return;
2412 }
2413
2414 host = ufshcd_get_variant(hba);
2415 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2416 return;
2417
2418 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2419 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2420
2421 reg = ufshcd_readl(hba, REG_UFS_CFG1);
2422 reg |= UFS_BIT(17);
2423 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2424
2425 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2426 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2427
2428 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2429 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2430
2431 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2432 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2433
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002434 /* clear bit 17 - UTP_DBG_RAMS_EN */
2435 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002436
2437 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2438 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2439
2440 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2441 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2442
2443 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2444 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2445
2446 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2447 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2448
2449 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2450 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2451
2452 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2453 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2454
2455 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2456 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2457}
2458
2459static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2460{
Subhash Jadavani9c807702017-04-01 00:35:51 -07002461 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2462 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2463 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002464 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002465 } else {
2466 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002467 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002468 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002469}
2470
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002471static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2472{
2473 /* provide a legal default configuration */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002474 host->testbus.select_major = TSTBUS_UNIPRO;
2475 host->testbus.select_minor = 37;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002476}
2477
Sayali Lokhande6db52742017-10-04 11:56:14 +05302478bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
2479 u8 select_major, u8 select_minor)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002480{
Sayali Lokhande6db52742017-10-04 11:56:14 +05302481 if (select_major >= TSTBUS_MAX) {
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002482 dev_err(host->hba->dev,
2483 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
Sayali Lokhande6db52742017-10-04 11:56:14 +05302484 __func__, select_major);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002485 return false;
2486 }
2487
2488 /*
2489 * Not performing check for each individual select_major
2490 * mappings of select_minor, since there is no harm in
2491 * configuring a non-existent select_minor
2492 */
Sayali Lokhande6db52742017-10-04 11:56:14 +05302493 if (select_minor > 0xFF) {
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002494 dev_err(host->hba->dev,
2495 "%s: 0x%05X is not a legal testbus option\n",
Sayali Lokhande6db52742017-10-04 11:56:14 +05302496 __func__, select_minor);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002497 return false;
2498 }
2499
2500 return true;
2501}
2502
Subhash Jadavani9c807702017-04-01 00:35:51 -07002503/*
2504 * The caller of this function must make sure that the controller
2505 * is out of runtime suspend and appropriate clocks are enabled
2506 * before accessing.
2507 */
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002508int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2509{
Sayali Lokhande6db52742017-10-04 11:56:14 +05302510 int reg = 0;
Asutosh Dasf7c5b032018-04-13 14:03:36 +05302511 int offset = -1, ret = 0, testbus_sel_offset = 19;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002512 u32 mask = TEST_BUS_SUB_SEL_MASK;
Sayali Lokhande6db52742017-10-04 11:56:14 +05302513 unsigned long flags;
2514 struct ufs_hba *hba;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002515
2516 if (!host)
2517 return -EINVAL;
Sayali Lokhande6db52742017-10-04 11:56:14 +05302518 hba = host->hba;
2519 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002520 switch (host->testbus.select_major) {
2521 case TSTBUS_UAWM:
2522 reg = UFS_TEST_BUS_CTRL_0;
2523 offset = 24;
2524 break;
2525 case TSTBUS_UARM:
2526 reg = UFS_TEST_BUS_CTRL_0;
2527 offset = 16;
2528 break;
2529 case TSTBUS_TXUC:
2530 reg = UFS_TEST_BUS_CTRL_0;
2531 offset = 8;
2532 break;
2533 case TSTBUS_RXUC:
2534 reg = UFS_TEST_BUS_CTRL_0;
2535 offset = 0;
2536 break;
2537 case TSTBUS_DFC:
2538 reg = UFS_TEST_BUS_CTRL_1;
2539 offset = 24;
2540 break;
2541 case TSTBUS_TRLUT:
2542 reg = UFS_TEST_BUS_CTRL_1;
2543 offset = 16;
2544 break;
2545 case TSTBUS_TMRLUT:
2546 reg = UFS_TEST_BUS_CTRL_1;
2547 offset = 8;
2548 break;
2549 case TSTBUS_OCSC:
2550 reg = UFS_TEST_BUS_CTRL_1;
2551 offset = 0;
2552 break;
2553 case TSTBUS_WRAPPER:
2554 reg = UFS_TEST_BUS_CTRL_2;
2555 offset = 16;
2556 break;
2557 case TSTBUS_COMBINED:
2558 reg = UFS_TEST_BUS_CTRL_2;
2559 offset = 8;
2560 break;
2561 case TSTBUS_UTP_HCI:
2562 reg = UFS_TEST_BUS_CTRL_2;
2563 offset = 0;
2564 break;
2565 case TSTBUS_UNIPRO:
2566 reg = UFS_UNIPRO_CFG;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002567 offset = 20;
2568 mask = 0xFFF;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002569 break;
2570 /*
2571 * No need for a default case, since
2572 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2573 * is legal
2574 */
2575 }
Asutosh Dasf7c5b032018-04-13 14:03:36 +05302576 if (offset < 0) {
2577 dev_err(hba->dev, "%s: Bad offset: %d\n", __func__, offset);
2578 ret = -EINVAL;
2579 spin_unlock_irqrestore(hba->host->host_lock, flags);
2580 goto out;
2581 }
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002582 mask <<= offset;
2583
Sayali Lokhande6db52742017-10-04 11:56:14 +05302584 spin_unlock_irqrestore(hba->host->host_lock, flags);
2585 if (reg) {
2586 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2587 (u32)host->testbus.select_major << testbus_sel_offset,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002588 REG_UFS_CFG1);
Sayali Lokhande6db52742017-10-04 11:56:14 +05302589 ufshcd_rmwl(host->hba, mask,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002590 (u32)host->testbus.select_minor << offset,
2591 reg);
Sayali Lokhande6db52742017-10-04 11:56:14 +05302592 } else {
2593 dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
2594 ret = -EINVAL;
2595 goto out;
2596 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002597 ufs_qcom_enable_test_bus(host);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002598 /*
2599 * Make sure the test bus configuration is
2600 * committed before returning.
2601 */
2602 mb();
Sayali Lokhande6db52742017-10-04 11:56:14 +05302603out:
2604 return ret;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002605}
2606
2607static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2608{
2609 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2610}
2611
Subhash Jadavani9c807702017-04-01 00:35:51 -07002612static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002613{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002614 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002615 u32 *testbus = NULL;
2616 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2617
2618 testbus = kmalloc(testbus_len, GFP_KERNEL);
2619 if (!testbus)
2620 return;
2621
2622 host->testbus.select_major = TSTBUS_UNIPRO;
2623 for (i = 0; i < nminor; i++) {
2624 host->testbus.select_minor = i;
2625 ufs_qcom_testbus_config(host);
2626 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2627 }
2628 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2629 16, 4, testbus, testbus_len, false);
2630 kfree(testbus);
2631}
2632
Subhash Jadavani86fed9d2018-02-21 10:21:15 -08002633static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba)
2634{
2635 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2636 u32 *testbus = NULL;
2637 int i, nminor = 32, testbus_len = nminor * sizeof(u32);
2638
2639 testbus = kmalloc(testbus_len, GFP_KERNEL);
2640 if (!testbus)
2641 return;
2642
2643 host->testbus.select_major = TSTBUS_UTP_HCI;
Can Guob7f80d52018-03-19 22:46:17 -07002644 for (i = 0; i < nminor; i++) {
Subhash Jadavani86fed9d2018-02-21 10:21:15 -08002645 host->testbus.select_minor = i;
2646 ufs_qcom_testbus_config(host);
2647 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2648 }
2649 print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET,
2650 16, 4, testbus, testbus_len, false);
2651 kfree(testbus);
2652}
2653
Subhash Jadavani9c807702017-04-01 00:35:51 -07002654static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2655{
2656 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2657 struct phy *phy = host->generic_phy;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002658
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002659 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2660 "HCI Vendor Specific Registers ");
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002661 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002662
2663 if (no_sleep)
2664 return;
2665
2666 /* sleep a bit intermittently as we are dumping too much data */
2667 usleep_range(1000, 1100);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002668 ufs_qcom_testbus_read(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002669 usleep_range(1000, 1100);
2670 ufs_qcom_print_unipro_testbus(hba);
2671 usleep_range(1000, 1100);
Subhash Jadavani86fed9d2018-02-21 10:21:15 -08002672 ufs_qcom_print_utp_hci_testbus(hba);
2673 usleep_range(1000, 1100);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002674 ufs_qcom_phy_dbg_register_dump(phy);
2675 usleep_range(1000, 1100);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002676 ufs_qcom_ice_print_regs(host);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002677}
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002678
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002679/**
2680 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2681 *
2682 * The variant operations configure the necessary controller and PHY
2683 * handshake during initialization.
2684 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02002685static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002686 .init = ufs_qcom_init,
2687 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03002688 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002689 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2690 .setup_clocks = ufs_qcom_setup_clocks,
2691 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2692 .link_startup_notify = ufs_qcom_link_startup_notify,
2693 .pwr_change_notify = ufs_qcom_pwr_change_notify,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002694 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002695 .suspend = ufs_qcom_suspend,
2696 .resume = ufs_qcom_resume,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002697 .full_reset = ufs_qcom_full_reset,
2698 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2699 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
Subhash Jadavani9c807702017-04-01 00:35:51 -07002700 .set_bus_vote = ufs_qcom_set_bus_vote,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002701 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002702#ifdef CONFIG_DEBUG_FS
2703 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2704#endif
2705};
2706
2707static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2708 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2709 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2710 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2711 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2712 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2713};
2714
2715static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2716 .req_start = ufs_qcom_pm_qos_req_start,
2717 .req_end = ufs_qcom_pm_qos_req_end,
2718};
2719
2720static struct ufs_hba_variant ufs_hba_qcom_variant = {
2721 .name = "qcom",
2722 .vops = &ufs_hba_qcom_vops,
2723 .crypto_vops = &ufs_hba_crypto_variant_ops,
2724 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002725};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002726
Yaniv Gardi47555a52015-10-28 13:15:49 +02002727/**
2728 * ufs_qcom_probe - probe routine of the driver
2729 * @pdev: pointer to Platform device handle
2730 *
2731 * Return zero for success and non-zero for failure
2732 */
2733static int ufs_qcom_probe(struct platform_device *pdev)
2734{
2735 int err;
2736 struct device *dev = &pdev->dev;
Asutosh Das134636a2017-06-07 11:47:42 +05302737 struct device_node *np = dev->of_node;
2738
2739 /*
2740 * On qcom platforms, bootdevice is the primary storage
2741 * device. This device can either be eMMC or UFS.
2742 * The type of device connected is detected at runtime.
2743 * So, if an eMMC device is connected, and this function
2744 * is invoked, it would turn-off the regulator if it detects
2745 * that the storage device is not ufs.
2746 * These regulators are turned ON by the bootloaders & turning
2747 * them off without sending PON may damage the connected device.
2748 * Hence, check for the connected device early-on & don't turn-off
2749 * the regulators.
2750 */
2751 if (of_property_read_bool(np, "non-removable") &&
Veerabhadrarao Badiganti1abe1e02018-05-09 16:58:55 +05302752 !of_property_read_bool(np, "force-ufshc-probe") &&
Asutosh Das134636a2017-06-07 11:47:42 +05302753 strcmp(android_boot_dev, dev_name(dev)))
2754 return -ENODEV;
Yaniv Gardi47555a52015-10-28 13:15:49 +02002755
2756 /* Perform generic probe */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002757 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002758 if (err)
2759 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2760
2761 return err;
2762}
2763
2764/**
2765 * ufs_qcom_remove - set driver_data of the device to NULL
2766 * @pdev: pointer to platform device handle
2767 *
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002768 * Always return 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02002769 */
2770static int ufs_qcom_remove(struct platform_device *pdev)
2771{
2772 struct ufs_hba *hba = platform_get_drvdata(pdev);
2773
2774 pm_runtime_get_sync(&(pdev)->dev);
2775 ufshcd_remove(hba);
2776 return 0;
2777}
2778
2779static const struct of_device_id ufs_qcom_of_match[] = {
2780 { .compatible = "qcom,ufshc"},
2781 {},
2782};
Javier Martinez Canillasea565332017-01-02 11:04:58 -03002783MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002784
2785static const struct dev_pm_ops ufs_qcom_pm_ops = {
2786 .suspend = ufshcd_pltfrm_suspend,
2787 .resume = ufshcd_pltfrm_resume,
2788 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2789 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2790 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2791};
2792
2793static struct platform_driver ufs_qcom_pltform = {
2794 .probe = ufs_qcom_probe,
2795 .remove = ufs_qcom_remove,
2796 .shutdown = ufshcd_pltfrm_shutdown,
2797 .driver = {
2798 .name = "ufshcd-qcom",
2799 .pm = &ufs_qcom_pm_ops,
2800 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2801 },
2802};
2803module_platform_driver(ufs_qcom_pltform);
2804
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002805MODULE_LICENSE("GPL v2");