blob: ee23fc7f2beec4d92bdfb4ca2865766558a04dc4 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Subhash Jadavani9c807702017-04-01 00:35:51 -07002 * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070017#include <linux/iopoll.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020018#include <linux/platform_device.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070019
20#ifdef CONFIG_QCOM_BUS_SCALING
21#include <linux/msm-bus.h>
22#endif
23
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020024#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020025#include <linux/phy/phy-qcom-ufs.h>
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020026
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020027#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020028#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020029#include "unipro.h"
30#include "ufs-qcom.h"
31#include "ufshci.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070032#include "ufs_quirks.h"
33#include "ufs-qcom-ice.h"
34#include "ufs-qcom-debugfs.h"
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -070035#include <linux/clk/qcom.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070036
Subhash Jadavani9c807702017-04-01 00:35:51 -070037#define MAX_PROP_SIZE 32
38#define VDDP_REF_CLK_MIN_UV 1200000
39#define VDDP_REF_CLK_MAX_UV 1200000
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070040/* TODO: further tuning for this parameter may be required */
41#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
42
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020043#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
44 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
45
46enum {
47 TSTBUS_UAWM,
48 TSTBUS_UARM,
49 TSTBUS_TXUC,
50 TSTBUS_RXUC,
51 TSTBUS_DFC,
52 TSTBUS_TRLUT,
53 TSTBUS_TMRLUT,
54 TSTBUS_OCSC,
55 TSTBUS_UTP_HCI,
56 TSTBUS_COMBINED,
57 TSTBUS_WRAPPER,
58 TSTBUS_UNIPRO,
59 TSTBUS_MAX,
60};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020061
62static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
63
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070064static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020065static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020066static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
67 u32 clk_cycles);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070068static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020069
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020070static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
71 char *prefix)
72{
73 print_hex_dump(KERN_ERR, prefix,
74 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070075 16, 4, hba->mmio_base + offset, len * 4, false);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020076}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020077
Yaniv Gardieba5ed32016-03-10 17:37:21 +020078static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
79 char *prefix, void *priv)
80{
81 ufs_qcom_dump_regs(hba, offset, len, prefix);
82}
83
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020084static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
85{
86 int err = 0;
87
88 err = ufshcd_dme_get(hba,
89 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
90 if (err)
91 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
92 __func__, err);
93
94 return err;
95}
96
97static int ufs_qcom_host_clk_get(struct device *dev,
98 const char *name, struct clk **clk_out)
99{
100 struct clk *clk;
101 int err = 0;
102
103 clk = devm_clk_get(dev, name);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700104 if (IS_ERR(clk))
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200105 err = PTR_ERR(clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700106 else
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200107 *clk_out = clk;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200108
109 return err;
110}
111
112static int ufs_qcom_host_clk_enable(struct device *dev,
113 const char *name, struct clk *clk)
114{
115 int err = 0;
116
117 err = clk_prepare_enable(clk);
118 if (err)
119 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
120
121 return err;
122}
123
124static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
125{
126 if (!host->is_lane_clks_enabled)
127 return;
128
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700129 if (host->tx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200130 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200131 clk_disable_unprepare(host->tx_l0_sync_clk);
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700132 if (host->rx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200133 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200134 clk_disable_unprepare(host->rx_l0_sync_clk);
135
136 host->is_lane_clks_enabled = false;
137}
138
139static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
140{
141 int err = 0;
142 struct device *dev = host->hba->dev;
143
144 if (host->is_lane_clks_enabled)
145 return 0;
146
147 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
148 host->rx_l0_sync_clk);
149 if (err)
150 goto out;
151
152 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
153 host->tx_l0_sync_clk);
154 if (err)
155 goto disable_rx_l0;
156
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200157 if (host->hba->lanes_per_direction > 1) {
158 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
159 host->rx_l1_sync_clk);
160 if (err)
161 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200162
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700163 /* The tx lane1 clk could be muxed, hence keep this optional */
164 if (host->tx_l1_sync_clk)
165 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
166 host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200167 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200168 host->is_lane_clks_enabled = true;
169 goto out;
170
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200171disable_tx_l0:
172 clk_disable_unprepare(host->tx_l0_sync_clk);
173disable_rx_l0:
174 clk_disable_unprepare(host->rx_l0_sync_clk);
175out:
176 return err;
177}
178
179static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
180{
181 int err = 0;
182 struct device *dev = host->hba->dev;
183
184 err = ufs_qcom_host_clk_get(dev,
185 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700186 if (err) {
187 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
188 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200189 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700190 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200191
192 err = ufs_qcom_host_clk_get(dev,
193 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700194 if (err) {
195 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
196 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200197 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700198 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200199
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200200 /* In case of single lane per direction, don't read lane1 clocks */
201 if (host->hba->lanes_per_direction > 1) {
202 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
203 &host->rx_l1_sync_clk);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700204 if (err) {
205 dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
206 __func__, err);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200207 goto out;
Subhash Jadavani9c807702017-04-01 00:35:51 -0700208 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200209
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700210 /* The tx lane1 clk could be muxed, hence keep this optional */
211 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
212 &host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200213 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200214out:
215 return err;
216}
217
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200218static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
219{
220 int err;
221 u32 tx_fsm_val = 0;
222 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
223
224 do {
225 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200226 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
227 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
228 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200229 if (err || tx_fsm_val == TX_FSM_HIBERN8)
230 break;
231
232 /* sleep for max. 200us */
233 usleep_range(100, 200);
234 } while (time_before(jiffies, timeout));
235
236 /*
237 * we might have scheduled out for long during polling so
238 * check the state again.
239 */
240 if (time_after(jiffies, timeout))
241 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200242 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
243 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
244 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200245
246 if (err) {
247 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
248 __func__, err);
249 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
250 err = tx_fsm_val;
251 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
252 __func__, err);
253 }
254
255 return err;
256}
257
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200258static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
259{
260 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
261 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
262 REG_UFS_CFG1);
263 /* make sure above configuration is applied before we return */
264 mb();
265}
266
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200267static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
268{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200269 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200270 struct phy *phy = host->generic_phy;
271 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200272 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
273 ? true : false;
274
275 /* Assert PHY reset and apply PHY calibration values */
276 ufs_qcom_assert_reset(hba);
277 /* provide 1ms delay to let the reset pulse propagate */
278 usleep_range(1000, 1100);
279
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200280 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200281
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200282 if (ret) {
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200283 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
284 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200285 goto out;
286 }
287
288 /* De-assert PHY reset and start serdes */
289 ufs_qcom_deassert_reset(hba);
290
291 /*
292 * after reset deassertion, phy will need all ref clocks,
293 * voltage, current to settle down before starting serdes.
294 */
295 usleep_range(1000, 1100);
296 ret = ufs_qcom_phy_start_serdes(phy);
297 if (ret) {
298 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
299 __func__, ret);
300 goto out;
301 }
302
303 ret = ufs_qcom_phy_is_pcs_ready(phy);
304 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700305 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200306 __func__, ret);
307
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200308 ufs_qcom_select_unipro_mode(host);
309
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200310out:
311 return ret;
312}
313
314/*
315 * The UTP controller has a number of internal clock gating cells (CGCs).
316 * Internal hardware sub-modules within the UTP controller control the CGCs.
317 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
318 * in a specific operation, UTP controller CGCs are by default disabled and
319 * this function enables them (after every UFS link startup) to save some power
320 * leakage.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700321 *
322 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
323 * in Qunipro, enable them to save additional power.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200324 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700325static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200326{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700327 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
328 int err = 0;
329
330 /* Enable UTP internal clock gating */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200331 ufshcd_writel(hba,
332 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
333 REG_UFS_CFG2);
334
335 /* Ensure that HW clock gating is enabled before next operations */
336 mb();
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700337
338 /* Enable Qunipro internal clock gating if supported */
339 if (!ufs_qcom_cap_qunipro_clk_gating(host))
340 goto out;
341
342 /* Enable all the mask bits */
343 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
344 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
345 if (err)
346 goto out;
347
348 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
349 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
350 if (err)
351 goto out;
352
353 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
354 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
355 DME_VS_CORE_CLK_CTRL);
356out:
357 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200358}
359
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -0700360static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
361{
362 struct ufs_clk_info *clki;
363
364 /*
365 * Configure the behavior of ufs clocks core and peripheral
366 * memory state when they are turned off.
367 * This configuration is required to allow retaining
368 * ICE crypto configuration (including keys) when
369 * core_clk_ice is turned off, and powering down
370 * non-ICE RAMs of host controller.
371 */
372 list_for_each_entry(clki, &hba->clk_list_head, list) {
373 if (!strcmp(clki->name, "core_clk_ice"))
374 clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
375 else
376 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
377 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
378 clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
379 }
380}
381
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200382static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
383 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200384{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200385 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200386 int err = 0;
387
388 switch (status) {
389 case PRE_CHANGE:
Venkat Gopalakrishnanf71c3852016-09-01 17:54:54 -0700390 ufs_qcom_force_mem_config(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200391 ufs_qcom_power_up_sequence(hba);
392 /*
393 * The PHY PLL output is the source of tx/rx lane symbol
394 * clocks, hence, enable the lane clocks only after PHY
395 * is initialized.
396 */
397 err = ufs_qcom_enable_lane_clks(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700398 if (!err && host->ice.pdev) {
399 err = ufs_qcom_ice_init(host);
400 if (err) {
401 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
402 __func__, err);
403 err = -EINVAL;
404 }
405 }
406
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200407 break;
408 case POST_CHANGE:
409 /* check if UFS PHY moved from DISABLED to HIBERN8 */
410 err = ufs_qcom_check_hibern8(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200411 break;
412 default:
413 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
414 err = -EINVAL;
415 break;
416 }
417 return err;
418}
419
420/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200421 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200422 */
Subhash Jadavani9c807702017-04-01 00:35:51 -0700423static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
424 u32 hs, u32 rate, bool update_link_startup_timer,
425 bool is_pre_scale_up)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200426{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200427 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200428 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200429 struct ufs_clk_info *clki;
430 u32 core_clk_period_in_ns;
431 u32 tx_clk_cycles_per_us = 0;
432 unsigned long core_clk_rate = 0;
433 u32 core_clk_cycles_per_us = 0;
434
435 static u32 pwm_fr_table[][2] = {
436 {UFS_PWM_G1, 0x1},
437 {UFS_PWM_G2, 0x1},
438 {UFS_PWM_G3, 0x1},
439 {UFS_PWM_G4, 0x1},
440 };
441
442 static u32 hs_fr_table_rA[][2] = {
443 {UFS_HS_G1, 0x1F},
444 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200445 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200446 };
447
448 static u32 hs_fr_table_rB[][2] = {
449 {UFS_HS_G1, 0x24},
450 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200451 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200452 };
453
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300454 /*
455 * The Qunipro controller does not use following registers:
456 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
457 * UFS_REG_PA_LINK_STARTUP_TIMER
458 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700459 * Aggregation / Auto hibern8 logic.
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300460 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700461 if (ufs_qcom_cap_qunipro(host) &&
462 (!(ufshcd_is_intr_aggr_allowed(hba) ||
463 ufshcd_is_auto_hibern8_supported(hba))))
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300464 goto out;
465
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200466 if (gear == 0) {
467 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
468 goto out_error;
469 }
470
471 list_for_each_entry(clki, &hba->clk_list_head, list) {
Subhash Jadavani9c807702017-04-01 00:35:51 -0700472 if (!strcmp(clki->name, "core_clk")) {
473 if (is_pre_scale_up)
474 core_clk_rate = clki->max_freq;
475 else
476 core_clk_rate = clk_get_rate(clki->clk);
477 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200478 }
479
480 /* If frequency is smaller than 1MHz, set to 1MHz */
481 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
482 core_clk_rate = DEFAULT_CLK_RATE_HZ;
483
484 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200485 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
486 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
487 /*
488 * make sure above write gets applied before we return from
489 * this function.
490 */
491 mb();
492 }
493
494 if (ufs_qcom_cap_qunipro(host))
495 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200496
497 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
498 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
499 core_clk_period_in_ns &= MASK_CLK_NS_REG;
500
501 switch (hs) {
502 case FASTAUTO_MODE:
503 case FAST_MODE:
504 if (rate == PA_HS_MODE_A) {
505 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
506 dev_err(hba->dev,
507 "%s: index %d exceeds table size %zu\n",
508 __func__, gear,
509 ARRAY_SIZE(hs_fr_table_rA));
510 goto out_error;
511 }
512 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
513 } else if (rate == PA_HS_MODE_B) {
514 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
515 dev_err(hba->dev,
516 "%s: index %d exceeds table size %zu\n",
517 __func__, gear,
518 ARRAY_SIZE(hs_fr_table_rB));
519 goto out_error;
520 }
521 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
522 } else {
523 dev_err(hba->dev, "%s: invalid rate = %d\n",
524 __func__, rate);
525 goto out_error;
526 }
527 break;
528 case SLOWAUTO_MODE:
529 case SLOW_MODE:
530 if (gear > ARRAY_SIZE(pwm_fr_table)) {
531 dev_err(hba->dev,
532 "%s: index %d exceeds table size %zu\n",
533 __func__, gear,
534 ARRAY_SIZE(pwm_fr_table));
535 goto out_error;
536 }
537 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
538 break;
539 case UNCHANGED:
540 default:
541 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
542 goto out_error;
543 }
544
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200545 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
546 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
547 /* this register 2 fields shall be written at once */
548 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
549 REG_UFS_TX_SYMBOL_CLK_NS_US);
550 /*
551 * make sure above write gets applied before we return from
552 * this function.
553 */
554 mb();
555 }
556
557 if (update_link_startup_timer) {
558 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
559 REG_UFS_PA_LINK_STARTUP_TIMER);
560 /*
561 * make sure that this configuration is applied before
562 * we return
563 */
564 mb();
565 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200566 goto out;
567
568out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200569 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200570out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200571 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200572}
573
Subhash Jadavani9c807702017-04-01 00:35:51 -0700574static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
575 u32 hs, u32 rate, bool update_link_startup_timer)
576{
577 return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
578 update_link_startup_timer, false);
579}
580
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700581static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
582{
583 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
584 struct phy *phy = host->generic_phy;
585 u32 unipro_ver;
586 int err = 0;
587
588 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
589 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
590 __func__);
591 err = -EINVAL;
592 goto out;
593 }
594
595 /* make sure RX LineCfg is enabled before link startup */
596 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
597 if (err)
598 goto out;
599
600 if (ufs_qcom_cap_qunipro(host)) {
601 /*
602 * set unipro core clock cycles to 150 & clear clock divider
603 */
604 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
605 if (err)
606 goto out;
607 }
608
609 err = ufs_qcom_enable_hw_clk_gating(hba);
610 if (err)
611 goto out;
612
613 /*
614 * Some UFS devices (and may be host) have issues if LCC is
615 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
616 * before link startup which will make sure that both host
617 * and device TX LCC are disabled once link startup is
618 * completed.
619 */
620 unipro_ver = ufshcd_get_local_unipro_ver(hba);
621 if (unipro_ver != UFS_UNIPRO_VER_1_41)
622 err = ufshcd_dme_set(hba,
623 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
624 0);
625 if (err)
626 goto out;
627
628 if (!ufs_qcom_cap_qunipro_clk_gating(host))
629 goto out;
630
631 /* Enable all the mask bits */
632 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
633 SAVECONFIGTIME_MODE_MASK,
634 PA_VS_CONFIG_REG1);
635out:
636 return err;
637}
638
639static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
640{
641 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
642 struct phy *phy = host->generic_phy;
643 u32 tx_lanes;
644 int err = 0;
645
646 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
647 if (err)
648 goto out;
649
650 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
651 if (err) {
652 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
653 __func__);
654 goto out;
655 }
656
657 /*
658 * Some UFS devices send incorrect LineCfg data as part of power mode
659 * change sequence which may cause host PHY to go into bad state.
660 * Disabling Rx LineCfg of host PHY should help avoid this.
661 */
662 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
663 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
664 if (err) {
665 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
666 __func__);
667 goto out;
668 }
669
670 /*
671 * UFS controller has *clk_req output to GCC, for each one if the clocks
672 * entering it. When *clk_req for a specific clock is de-asserted,
673 * a corresponding clock from GCC is stopped. UFS controller de-asserts
674 * *clk_req outputs when it is in Auto Hibernate state only if the
675 * Clock request feature is enabled.
676 * Enable the Clock request feature:
677 * - Enable HW clock control for UFS clocks in GCC (handled by the
678 * clock driver as part of clk_prepare_enable).
679 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
680 */
681 if (ufshcd_is_auto_hibern8_supported(hba))
682 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
683 UFS_HW_CLK_CTRL_EN,
684 UFS_AH8_CFG);
685 /*
686 * Make sure clock request feature gets enabled for HW clk gating
687 * before further operations.
688 */
689 mb();
690
691out:
692 return err;
693}
694
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200695static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
696 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200697{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200698 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200699
700 switch (status) {
701 case PRE_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700702 err = ufs_qcom_link_startup_pre_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200703 break;
704 case POST_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700705 err = ufs_qcom_link_startup_post_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200706 break;
707 default:
708 break;
709 }
710
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200711 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200712}
713
Subhash Jadavani9c807702017-04-01 00:35:51 -0700714
715static int ufs_qcom_config_vreg(struct device *dev,
716 struct ufs_vreg *vreg, bool on)
717{
718 int ret = 0;
719 struct regulator *reg;
720 int min_uV, uA_load;
721
722 if (!vreg) {
723 WARN_ON(1);
724 ret = -EINVAL;
725 goto out;
726 }
727
728 reg = vreg->reg;
729 if (regulator_count_voltages(reg) > 0) {
730 min_uV = on ? vreg->min_uV : 0;
731 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
732 if (ret) {
733 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
734 __func__, vreg->name, ret);
735 goto out;
736 }
737
738 uA_load = on ? vreg->max_uA : 0;
739 ret = regulator_set_load(vreg->reg, uA_load);
740 if (ret)
741 goto out;
742 }
743out:
744 return ret;
745}
746
747static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
748{
749 int ret = 0;
750
751 if (vreg->enabled)
752 return ret;
753
754 ret = ufs_qcom_config_vreg(dev, vreg, true);
755 if (ret)
756 goto out;
757
758 ret = regulator_enable(vreg->reg);
759 if (ret)
760 goto out;
761
762 vreg->enabled = true;
763out:
764 return ret;
765}
766
767static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
768{
769 int ret = 0;
770
771 if (!vreg->enabled)
772 return ret;
773
774 ret = regulator_disable(vreg->reg);
775 if (ret)
776 goto out;
777
778 ret = ufs_qcom_config_vreg(dev, vreg, false);
779 if (ret)
780 goto out;
781
782 vreg->enabled = false;
783out:
784 return ret;
785}
786
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200787static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
788{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200789 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200790 struct phy *phy = host->generic_phy;
791 int ret = 0;
792
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200793 /*
Subhash Jadavani9c807702017-04-01 00:35:51 -0700794 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
795 * power rail and low noise analog power rail for PLL can be
796 * switched off.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200797 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200798 if (!ufs_qcom_is_link_active(hba)) {
799 ufs_qcom_disable_lane_clks(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200800 phy_power_off(phy);
801
Subhash Jadavani9c807702017-04-01 00:35:51 -0700802 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
803 ret = ufs_qcom_disable_vreg(hba->dev,
804 host->vddp_ref_clk);
805 ufs_qcom_ice_suspend(host);
806
807 if (ufs_qcom_is_link_off(hba)) {
808 /* Assert PHY soft reset */
809 ufs_qcom_assert_reset(hba);
810 goto out;
811 }
812 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700813 /* Unvote PM QoS */
814 ufs_qcom_pm_qos_suspend(host);
815
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200816out:
817 return ret;
818}
819
820static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
821{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200822 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200823 struct phy *phy = host->generic_phy;
824 int err;
825
826 err = phy_power_on(phy);
827 if (err) {
828 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
829 __func__, err);
830 goto out;
831 }
832
Subhash Jadavani9c807702017-04-01 00:35:51 -0700833 if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
834 hba->spm_lvl > UFS_PM_LVL_3))
835 ufs_qcom_enable_vreg(hba->dev,
836 host->vddp_ref_clk);
837
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200838 err = ufs_qcom_enable_lane_clks(host);
839 if (err)
840 goto out;
841
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700842 err = ufs_qcom_ice_resume(host);
843 if (err) {
844 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
845 __func__, err);
846 goto out;
847 }
848
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200849 hba->is_sys_suspended = false;
850
851out:
852 return err;
853}
854
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700855static int ufs_qcom_full_reset(struct ufs_hba *hba)
856{
Subhash Jadavani9c807702017-04-01 00:35:51 -0700857 int ret = -ENOTSUPP;
858
859 if (!hba->core_reset) {
860 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
861 ret);
862 goto out;
863 }
864
865 ret = reset_control_assert(hba->core_reset);
866 if (ret) {
867 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
868 __func__, ret);
869 goto out;
870 }
871
872 /*
873 * The hardware requirement for delay between assert/deassert
874 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
875 * ~125us (4/32768). To be on the safe side add 200us delay.
876 */
877 usleep_range(200, 210);
878
879 ret = reset_control_deassert(hba->core_reset);
880 if (ret)
881 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
882 __func__, ret);
883
884out:
885 return ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700886}
887
888#ifdef CONFIG_SCSI_UFS_QCOM_ICE
889static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
890 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
891{
892 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
893 struct request *req;
894 int ret;
895
896 if (lrbp->cmd && lrbp->cmd->request)
897 req = lrbp->cmd->request;
898 else
899 return 0;
900
901 /* Use request LBA as the DUN value */
902 if (req->bio)
Subhash Jadavani9c807702017-04-01 00:35:51 -0700903 *dun = (req->bio->bi_iter.bi_sector) >>
904 UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700905
906 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
907
908 return ret;
909}
910
911static
912int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
913{
914 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
915 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
916 int err = 0;
917
918 if (!host->ice.pdev ||
919 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
920 goto out;
921
922 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
923out:
924 return err;
925}
926
927static
928int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
929 struct ufshcd_lrb *lrbp, struct request *req)
930{
931 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
932 int err = 0;
933
934 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
935 goto out;
936
937 err = ufs_qcom_ice_cfg_end(host, req);
938out:
939 return err;
940}
941
942static
943int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
944{
945 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
946 int err = 0;
947
948 if (!host->ice.pdev)
949 goto out;
950
951 err = ufs_qcom_ice_reset(host);
952out:
953 return err;
954}
955
956static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
957{
958 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
959
960 if (!status)
961 return -EINVAL;
962
963 return ufs_qcom_ice_get_status(host, status);
964}
965#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
966#define ufs_qcom_crypto_req_setup NULL
967#define ufs_qcom_crytpo_engine_cfg_start NULL
968#define ufs_qcom_crytpo_engine_cfg_end NULL
969#define ufs_qcom_crytpo_engine_reset NULL
970#define ufs_qcom_crypto_engine_get_status NULL
971#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
972
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200973struct ufs_qcom_dev_params {
974 u32 pwm_rx_gear; /* pwm rx gear to work in */
975 u32 pwm_tx_gear; /* pwm tx gear to work in */
976 u32 hs_rx_gear; /* hs rx gear to work in */
977 u32 hs_tx_gear; /* hs tx gear to work in */
978 u32 rx_lanes; /* number of rx lanes */
979 u32 tx_lanes; /* number of tx lanes */
980 u32 rx_pwr_pwm; /* rx pwm working pwr */
981 u32 tx_pwr_pwm; /* tx pwm working pwr */
982 u32 rx_pwr_hs; /* rx hs working pwr */
983 u32 tx_pwr_hs; /* tx hs working pwr */
984 u32 hs_rate; /* rate A/B to work in HS */
985 u32 desired_working_mode;
986};
987
988static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
989 struct ufs_pa_layer_attr *dev_max,
990 struct ufs_pa_layer_attr *agreed_pwr)
991{
992 int min_qcom_gear;
993 int min_dev_gear;
994 bool is_dev_sup_hs = false;
995 bool is_qcom_max_hs = false;
996
997 if (dev_max->pwr_rx == FAST_MODE)
998 is_dev_sup_hs = true;
999
1000 if (qcom_param->desired_working_mode == FAST) {
1001 is_qcom_max_hs = true;
1002 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
1003 qcom_param->hs_tx_gear);
1004 } else {
1005 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
1006 qcom_param->pwm_tx_gear);
1007 }
1008
1009 /*
1010 * device doesn't support HS but qcom_param->desired_working_mode is
1011 * HS, thus device and qcom_param don't agree
1012 */
1013 if (!is_dev_sup_hs && is_qcom_max_hs) {
1014 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
1015 __func__);
1016 return -ENOTSUPP;
1017 } else if (is_dev_sup_hs && is_qcom_max_hs) {
1018 /*
1019 * since device supports HS, it supports FAST_MODE.
1020 * since qcom_param->desired_working_mode is also HS
1021 * then final decision (FAST/FASTAUTO) is done according
1022 * to qcom_params as it is the restricting factor
1023 */
1024 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1025 qcom_param->rx_pwr_hs;
1026 } else {
1027 /*
1028 * here qcom_param->desired_working_mode is PWM.
1029 * it doesn't matter whether device supports HS or PWM,
1030 * in both cases qcom_param->desired_working_mode will
1031 * determine the mode
1032 */
1033 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1034 qcom_param->rx_pwr_pwm;
1035 }
1036
1037 /*
1038 * we would like tx to work in the minimum number of lanes
1039 * between device capability and vendor preferences.
1040 * the same decision will be made for rx
1041 */
1042 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1043 qcom_param->tx_lanes);
1044 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1045 qcom_param->rx_lanes);
1046
1047 /* device maximum gear is the minimum between device rx and tx gears */
1048 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1049
1050 /*
1051 * if both device capabilities and vendor pre-defined preferences are
1052 * both HS or both PWM then set the minimum gear to be the chosen
1053 * working gear.
1054 * if one is PWM and one is HS then the one that is PWM get to decide
1055 * what is the gear, as it is the one that also decided previously what
1056 * pwr the device will be configured to.
1057 */
1058 if ((is_dev_sup_hs && is_qcom_max_hs) ||
1059 (!is_dev_sup_hs && !is_qcom_max_hs))
1060 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1061 min_t(u32, min_dev_gear, min_qcom_gear);
1062 else if (!is_dev_sup_hs)
1063 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1064 else
1065 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1066
1067 agreed_pwr->hs_rate = qcom_param->hs_rate;
1068 return 0;
1069}
1070
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001071#ifdef CONFIG_QCOM_BUS_SCALING
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001072static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1073 const char *speed_mode)
1074{
1075 struct device *dev = host->hba->dev;
1076 struct device_node *np = dev->of_node;
1077 int err;
1078 const char *key = "qcom,bus-vector-names";
1079
1080 if (!speed_mode) {
1081 err = -EINVAL;
1082 goto out;
1083 }
1084
1085 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1086 err = of_property_match_string(np, key, "MAX");
1087 else
1088 err = of_property_match_string(np, key, speed_mode);
1089
1090out:
1091 if (err < 0)
1092 dev_err(dev, "%s: Invalid %s mode %d\n",
1093 __func__, speed_mode, err);
1094 return err;
1095}
1096
1097static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1098{
1099 int gear = max_t(u32, p->gear_rx, p->gear_tx);
1100 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1101 int pwr;
1102
1103 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1104 if (!gear)
1105 gear = 1;
1106
1107 if (!lanes)
1108 lanes = 1;
1109
1110 if (!p->pwr_rx && !p->pwr_tx) {
1111 pwr = SLOWAUTO_MODE;
1112 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1113 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1114 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1115 pwr = FAST_MODE;
1116 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1117 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1118 } else {
1119 pwr = SLOW_MODE;
1120 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1121 "PWM", gear, lanes);
1122 }
1123}
1124
Subhash Jadavani9c807702017-04-01 00:35:51 -07001125static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001126{
1127 int err = 0;
1128
1129 if (vote != host->bus_vote.curr_vote) {
1130 err = msm_bus_scale_client_update_request(
1131 host->bus_vote.client_handle, vote);
1132 if (err) {
1133 dev_err(host->hba->dev,
1134 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1135 __func__, host->bus_vote.client_handle,
1136 vote, err);
1137 goto out;
1138 }
1139
1140 host->bus_vote.curr_vote = vote;
1141 }
1142out:
1143 return err;
1144}
1145
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001146static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1147{
1148 int vote;
1149 int err = 0;
1150 char mode[BUS_VECTOR_NAME_LEN];
1151
1152 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1153
1154 vote = ufs_qcom_get_bus_vote(host, mode);
1155 if (vote >= 0)
Subhash Jadavani9c807702017-04-01 00:35:51 -07001156 err = __ufs_qcom_set_bus_vote(host, vote);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001157 else
1158 err = vote;
1159
1160 if (err)
1161 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1162 else
1163 host->bus_vote.saved_vote = vote;
1164 return err;
1165}
1166
Subhash Jadavani9c807702017-04-01 00:35:51 -07001167static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1168{
1169 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1170 int vote, err;
1171
1172 /*
1173 * In case ufs_qcom_init() is not yet done, simply ignore.
1174 * This ufs_qcom_set_bus_vote() shall be called from
1175 * ufs_qcom_init() after init is done.
1176 */
1177 if (!host)
1178 return 0;
1179
1180 if (on) {
1181 vote = host->bus_vote.saved_vote;
1182 if (vote == host->bus_vote.min_bw_vote)
1183 ufs_qcom_update_bus_bw_vote(host);
1184 } else {
1185 vote = host->bus_vote.min_bw_vote;
1186 }
1187
1188 err = __ufs_qcom_set_bus_vote(host, vote);
1189 if (err)
1190 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1191 __func__, err);
1192
1193 return err;
1194}
1195
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001196static ssize_t
1197show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1198 char *buf)
1199{
1200 struct ufs_hba *hba = dev_get_drvdata(dev);
1201 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1202
1203 return snprintf(buf, PAGE_SIZE, "%u\n",
1204 host->bus_vote.is_max_bw_needed);
1205}
1206
1207static ssize_t
1208store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1209 const char *buf, size_t count)
1210{
1211 struct ufs_hba *hba = dev_get_drvdata(dev);
1212 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1213 uint32_t value;
1214
1215 if (!kstrtou32(buf, 0, &value)) {
1216 host->bus_vote.is_max_bw_needed = !!value;
1217 ufs_qcom_update_bus_bw_vote(host);
1218 }
1219
1220 return count;
1221}
1222
1223static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1224{
1225 int err;
1226 struct msm_bus_scale_pdata *bus_pdata;
1227 struct device *dev = host->hba->dev;
1228 struct platform_device *pdev = to_platform_device(dev);
1229 struct device_node *np = dev->of_node;
1230
1231 bus_pdata = msm_bus_cl_get_pdata(pdev);
1232 if (!bus_pdata) {
1233 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1234 err = -ENODATA;
1235 goto out;
1236 }
1237
1238 err = of_property_count_strings(np, "qcom,bus-vector-names");
1239 if (err < 0 || err != bus_pdata->num_usecases) {
1240 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1241 __func__, err);
1242 goto out;
1243 }
1244
1245 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1246 if (!host->bus_vote.client_handle) {
1247 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1248 __func__);
1249 err = -EFAULT;
1250 goto out;
1251 }
1252
1253 /* cache the vote index for minimum and maximum bandwidth */
1254 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1255 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1256
1257 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1258 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1259 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1260 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1261 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1262 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1263out:
1264 return err;
1265}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001266#else /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001267static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1268{
1269 return 0;
1270}
1271
Subhash Jadavani9c807702017-04-01 00:35:51 -07001272static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001273{
1274 return 0;
1275}
1276
1277static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1278{
1279 return 0;
1280}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001281static inline void msm_bus_scale_unregister_client(uint32_t cl)
1282{
1283}
1284#endif /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001285
1286static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1287{
1288 if (host->dev_ref_clk_ctrl_mmio &&
1289 (enable ^ host->is_dev_ref_clk_enabled)) {
1290 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1291
1292 if (enable)
1293 temp |= host->dev_ref_clk_en_mask;
1294 else
1295 temp &= ~host->dev_ref_clk_en_mask;
1296
1297 /*
1298 * If we are here to disable this clock it might be immediately
1299 * after entering into hibern8 in which case we need to make
1300 * sure that device ref_clk is active at least 1us after the
1301 * hibern8 enter.
1302 */
1303 if (!enable)
1304 udelay(1);
1305
1306 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1307
1308 /* ensure that ref_clk is enabled/disabled before we return */
1309 wmb();
1310
1311 /*
1312 * If we call hibern8 exit after this, we need to make sure that
1313 * device ref_clk is stable for at least 1us before the hibern8
1314 * exit command.
1315 */
1316 if (enable)
1317 udelay(1);
1318
1319 host->is_dev_ref_clk_enabled = enable;
1320 }
1321}
1322
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001323static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001324 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001325 struct ufs_pa_layer_attr *dev_max_params,
1326 struct ufs_pa_layer_attr *dev_req_params)
1327{
1328 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001329 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001330 struct phy *phy = host->generic_phy;
1331 struct ufs_qcom_dev_params ufs_qcom_cap;
1332 int ret = 0;
1333 int res = 0;
1334
1335 if (!dev_req_params) {
1336 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1337 ret = -EINVAL;
1338 goto out;
1339 }
1340
1341 switch (status) {
1342 case PRE_CHANGE:
1343 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1344 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1345 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1346 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1347 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1348 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1349 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1350 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1351 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1352 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1353 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1354 ufs_qcom_cap.desired_working_mode =
1355 UFS_QCOM_LIMIT_DESIRED_MODE;
1356
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001357 if (host->hw_ver.major == 0x1) {
1358 /*
1359 * HS-G3 operations may not reliably work on legacy QCOM
1360 * UFS host controller hardware even though capability
1361 * exchange during link startup phase may end up
1362 * negotiating maximum supported gear as G3.
1363 * Hence downgrade the maximum supported gear to HS-G2.
1364 */
1365 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1366 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1367 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1368 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1369 }
1370
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001371 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1372 dev_max_params,
1373 dev_req_params);
1374 if (ret) {
1375 pr_err("%s: failed to determine capabilities\n",
1376 __func__);
1377 goto out;
1378 }
1379
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001380 /* enable the device ref clock before changing to HS mode */
1381 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1382 ufshcd_is_hs_mode(dev_req_params))
1383 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001384 break;
1385 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001386 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001387 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001388 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001389 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1390 __func__);
1391 /*
1392 * we return error code at the end of the routine,
1393 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1394 * and bus voting as usual
1395 */
1396 ret = -EINVAL;
1397 }
1398
1399 val = ~(MAX_U32 << dev_req_params->lane_tx);
1400 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1401 if (res) {
1402 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1403 __func__, res);
1404 ret = res;
1405 }
1406
1407 /* cache the power mode parameters to use internally */
1408 memcpy(&host->dev_req_params,
1409 dev_req_params, sizeof(*dev_req_params));
1410 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001411
1412 /* disable the device ref clock if entered PWM mode */
1413 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1414 !ufshcd_is_hs_mode(dev_req_params))
1415 ufs_qcom_dev_ref_clk_ctrl(host, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001416 break;
1417 default:
1418 ret = -EINVAL;
1419 break;
1420 }
1421out:
1422 return ret;
1423}
1424
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001425static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1426{
1427 int err;
1428 u32 pa_vs_config_reg1;
1429
1430 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1431 &pa_vs_config_reg1);
1432 if (err)
1433 goto out;
1434
1435 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1436 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1437 (pa_vs_config_reg1 | (1 << 12)));
1438
1439out:
1440 return err;
1441}
1442
1443static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1444{
1445 int err = 0;
1446
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08001447 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001448 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1449
1450 return err;
1451}
1452
Yaniv Gardiae977582015-05-17 18:55:06 +03001453static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1454{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001455 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001456
1457 if (host->hw_ver.major == 0x1)
1458 return UFSHCI_VERSION_11;
1459 else
1460 return UFSHCI_VERSION_20;
1461}
1462
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001463/**
1464 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1465 * @hba: host controller instance
1466 *
1467 * QCOM UFS host controller might have some non standard behaviours (quirks)
1468 * than what is specified by UFSHCI specification. Advertise all such
1469 * quirks to standard UFS host controller driver so standard takes them into
1470 * account.
1471 */
1472static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1473{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001474 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001475
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001476 if (host->hw_ver.major == 0x1) {
1477 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1478 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1479 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001480
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001481 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001482 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001483
1484 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001485 }
1486
Subhash Jadavanic04fcdd2016-08-05 11:20:10 -07001487 if (host->hw_ver.major == 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001488 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001489
Yaniv Gardicad2e032015-03-31 17:37:14 +03001490 if (!ufs_qcom_cap_qunipro(host))
1491 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001492 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001493 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001494 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001495 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001496
1497 if (host->disable_lpm)
1498 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
Yaniv Gardicad2e032015-03-31 17:37:14 +03001499}
1500
1501static void ufs_qcom_set_caps(struct ufs_hba *hba)
1502{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001503 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001504
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001505 if (!host->disable_lpm) {
1506 hba->caps |= UFSHCD_CAP_CLK_GATING;
1507 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1508 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1509 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001510 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001511
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001512 if (host->hw_ver.major >= 0x2) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001513 if (!host->disable_lpm)
1514 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001515 host->caps = UFS_QCOM_CAP_QUNIPRO |
1516 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001517 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001518 if (host->hw_ver.major >= 0x3) {
1519 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1520 /*
1521 * The UFS PHY attached to v3.0.0 controller supports entering
1522 * deeper low power state of SVS2. This lets the controller
1523 * run at much lower clock frequencies for saving power.
1524 * Assuming this and any future revisions of the controller
1525 * support this capability. Need to revist this assumption if
1526 * any future platform with this core doesn't support the
1527 * capability, as there will be no benefit running at lower
1528 * frequencies then.
1529 */
1530 host->caps |= UFS_QCOM_CAP_SVS2;
1531 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001532}
1533
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001534/**
1535 * ufs_qcom_setup_clocks - enables/disable clocks
1536 * @hba: host controller instance
1537 * @on: If true, enable clocks else disable them.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001538 * @is_gating_context: If true then it means this function is called from
1539 * aggressive clock gating context and we may only need to gate off important
1540 * clocks. If false then make sure to gate off all clocks.
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001541 *
1542 * Returns 0 on success, non-zero on failure.
1543 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001544static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1545 bool is_gating_context)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001546{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001547 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001548 int err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001549
1550 /*
1551 * In case ufs_qcom_init() is not yet done, simply ignore.
1552 * This ufs_qcom_setup_clocks() shall be called from
1553 * ufs_qcom_init() after init is done.
1554 */
1555 if (!host)
1556 return 0;
1557
1558 if (on) {
1559 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1560 if (err)
1561 goto out;
1562
1563 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1564 if (err) {
1565 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1566 __func__, err);
1567 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1568 goto out;
1569 }
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001570 /* enable the device ref clock for HS mode*/
1571 if (ufshcd_is_hs_mode(&hba->pwr_info))
1572 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001573
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001574 err = ufs_qcom_ice_resume(host);
1575 if (err)
1576 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001577 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001578 err = ufs_qcom_ice_suspend(host);
1579 if (err)
1580 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001581
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001582 /* M-PHY RMMI interface clocks can be turned off */
1583 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001584 /*
1585 * If auto hibern8 is supported then the link will already
1586 * be in hibern8 state and the ref clock can be gated.
1587 */
1588 if (ufshcd_is_auto_hibern8_supported(hba) ||
1589 !ufs_qcom_is_link_active(hba)) {
1590 /* turn off UFS local PHY ref_clk */
1591 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001592 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001593 ufs_qcom_dev_ref_clk_ctrl(host, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001594 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001595 }
1596
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001597out:
1598 return err;
1599}
1600
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001601#ifdef CONFIG_SMP /* CONFIG_SMP */
1602static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1603{
1604 int i;
1605
1606 if (cpu >= 0 && cpu < num_possible_cpus())
1607 for (i = 0; i < host->pm_qos.num_groups; i++)
1608 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1609 return i;
1610
1611 return host->pm_qos.default_cpu;
1612}
1613
1614static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1615{
1616 unsigned long flags;
1617 struct ufs_qcom_host *host;
1618 struct ufs_qcom_pm_qos_cpu_group *group;
1619
1620 if (!hba || !req)
1621 return;
1622
1623 host = ufshcd_get_variant(hba);
1624 if (!host->pm_qos.groups)
1625 return;
1626
1627 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1628
1629 spin_lock_irqsave(hba->host->host_lock, flags);
1630 if (!host->pm_qos.is_enabled)
1631 goto out;
1632
1633 group->active_reqs++;
1634 if (group->state != PM_QOS_REQ_VOTE &&
1635 group->state != PM_QOS_VOTED) {
1636 group->state = PM_QOS_REQ_VOTE;
1637 queue_work(host->pm_qos.workq, &group->vote_work);
1638 }
1639out:
1640 spin_unlock_irqrestore(hba->host->host_lock, flags);
1641}
1642
1643/* hba->host->host_lock is assumed to be held by caller */
1644static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1645{
1646 struct ufs_qcom_pm_qos_cpu_group *group;
1647
1648 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1649 return;
1650
1651 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1652
1653 if (--group->active_reqs)
1654 return;
1655 group->state = PM_QOS_REQ_UNVOTE;
1656 queue_work(host->pm_qos.workq, &group->unvote_work);
1657}
1658
1659static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1660 bool should_lock)
1661{
1662 unsigned long flags = 0;
1663
1664 if (!hba || !req)
1665 return;
1666
1667 if (should_lock)
1668 spin_lock_irqsave(hba->host->host_lock, flags);
1669 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1670 if (should_lock)
1671 spin_unlock_irqrestore(hba->host->host_lock, flags);
1672}
1673
1674static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1675{
1676 struct ufs_qcom_pm_qos_cpu_group *group =
1677 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1678 struct ufs_qcom_host *host = group->host;
1679 unsigned long flags;
1680
1681 spin_lock_irqsave(host->hba->host->host_lock, flags);
1682
1683 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1684 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1685 return;
1686 }
1687
1688 group->state = PM_QOS_VOTED;
1689 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1690
1691 pm_qos_update_request(&group->req, group->latency_us);
1692}
1693
1694static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1695{
1696 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1697 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1698 struct ufs_qcom_host *host = group->host;
1699 unsigned long flags;
1700
1701 /*
1702 * Check if new requests were submitted in the meantime and do not
1703 * unvote if so.
1704 */
1705 spin_lock_irqsave(host->hba->host->host_lock, flags);
1706
1707 if (!host->pm_qos.is_enabled || group->active_reqs) {
1708 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1709 return;
1710 }
1711
1712 group->state = PM_QOS_UNVOTED;
1713 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1714
1715 pm_qos_update_request_timeout(&group->req,
1716 group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
1717}
1718
1719static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1720 struct device_attribute *attr, char *buf)
1721{
1722 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1723 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1724
1725 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1726}
1727
1728static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1729 struct device_attribute *attr, const char *buf, size_t count)
1730{
1731 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1732 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1733 unsigned long value;
1734 unsigned long flags;
1735 bool enable;
1736 int i;
1737
1738 if (kstrtoul(buf, 0, &value))
1739 return -EINVAL;
1740
1741 enable = !!value;
1742
1743 /*
1744 * Must take the spinlock and save irqs before changing the enabled
1745 * flag in order to keep correctness of PM QoS release.
1746 */
1747 spin_lock_irqsave(hba->host->host_lock, flags);
1748 if (enable == host->pm_qos.is_enabled) {
1749 spin_unlock_irqrestore(hba->host->host_lock, flags);
1750 return count;
1751 }
1752 host->pm_qos.is_enabled = enable;
1753 spin_unlock_irqrestore(hba->host->host_lock, flags);
1754
1755 if (!enable)
1756 for (i = 0; i < host->pm_qos.num_groups; i++) {
1757 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1758 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1759 spin_lock_irqsave(hba->host->host_lock, flags);
1760 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1761 host->pm_qos.groups[i].active_reqs = 0;
1762 spin_unlock_irqrestore(hba->host->host_lock, flags);
1763 pm_qos_update_request(&host->pm_qos.groups[i].req,
1764 PM_QOS_DEFAULT_VALUE);
1765 }
1766
1767 return count;
1768}
1769
1770static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1771 struct device_attribute *attr, char *buf)
1772{
1773 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1774 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1775 int ret;
1776 int i;
1777 int offset = 0;
1778
1779 for (i = 0; i < host->pm_qos.num_groups; i++) {
1780 ret = snprintf(&buf[offset], PAGE_SIZE,
1781 "cpu group #%d(mask=0x%lx): %d\n", i,
1782 host->pm_qos.groups[i].mask.bits[0],
1783 host->pm_qos.groups[i].latency_us);
1784 if (ret > 0)
1785 offset += ret;
1786 else
1787 break;
1788 }
1789
1790 return offset;
1791}
1792
1793static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1794 struct device_attribute *attr, const char *buf, size_t count)
1795{
1796 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1797 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1798 unsigned long value;
1799 unsigned long flags;
1800 char *strbuf;
1801 char *strbuf_copy;
1802 char *token;
1803 int i;
1804 int ret;
1805
1806 /* reserve one byte for null termination */
1807 strbuf = kmalloc(count + 1, GFP_KERNEL);
1808 if (!strbuf)
1809 return -ENOMEM;
1810 strbuf_copy = strbuf;
1811 strlcpy(strbuf, buf, count + 1);
1812
1813 for (i = 0; i < host->pm_qos.num_groups; i++) {
1814 token = strsep(&strbuf, ",");
1815 if (!token)
1816 break;
1817
1818 ret = kstrtoul(token, 0, &value);
1819 if (ret)
1820 break;
1821
1822 spin_lock_irqsave(hba->host->host_lock, flags);
1823 host->pm_qos.groups[i].latency_us = value;
1824 spin_unlock_irqrestore(hba->host->host_lock, flags);
1825 }
1826
1827 kfree(strbuf_copy);
1828 return count;
1829}
1830
1831static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1832{
1833 struct device_node *node = host->hba->dev->of_node;
1834 struct device_attribute *attr;
1835 int ret = 0;
1836 int num_groups;
1837 int num_values;
1838 char wq_name[sizeof("ufs_pm_qos_00")];
1839 int i;
1840
1841 num_groups = of_property_count_u32_elems(node,
1842 "qcom,pm-qos-cpu-groups");
1843 if (num_groups <= 0)
1844 goto no_pm_qos;
1845
1846 num_values = of_property_count_u32_elems(node,
1847 "qcom,pm-qos-cpu-group-latency-us");
1848 if (num_values <= 0)
1849 goto no_pm_qos;
1850
1851 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1852 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1853 __func__, num_groups, num_values, num_possible_cpus());
1854 goto no_pm_qos;
1855 }
1856
1857 host->pm_qos.num_groups = num_groups;
1858 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1859 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1860 if (!host->pm_qos.groups)
1861 return -ENOMEM;
1862
1863 for (i = 0; i < host->pm_qos.num_groups; i++) {
1864 u32 mask;
1865
1866 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1867 i, &mask);
1868 if (ret)
1869 goto free_groups;
1870 host->pm_qos.groups[i].mask.bits[0] = mask;
1871 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1872 cpu_possible_mask)) {
1873 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1874 __func__, mask);
1875 goto free_groups;
1876 }
1877
1878 ret = of_property_read_u32_index(node,
1879 "qcom,pm-qos-cpu-group-latency-us", i,
1880 &host->pm_qos.groups[i].latency_us);
1881 if (ret)
1882 goto free_groups;
1883
1884 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
1885 host->pm_qos.groups[i].req.cpus_affine =
1886 host->pm_qos.groups[i].mask;
1887 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1888 host->pm_qos.groups[i].active_reqs = 0;
1889 host->pm_qos.groups[i].host = host;
1890
1891 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1892 ufs_qcom_pm_qos_vote_work);
1893 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1894 ufs_qcom_pm_qos_unvote_work);
1895 }
1896
1897 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1898 &host->pm_qos.default_cpu);
1899 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1900 host->pm_qos.default_cpu = 0;
1901
1902 /*
1903 * Use a single-threaded workqueue to assure work submitted to the queue
1904 * is performed in order. Consider the following 2 possible cases:
1905 *
1906 * 1. A new request arrives and voting work is scheduled for it. Before
1907 * the voting work is performed the request is finished and unvote
1908 * work is also scheduled.
1909 * 2. A request is finished and unvote work is scheduled. Before the
1910 * work is performed a new request arrives and voting work is also
1911 * scheduled.
1912 *
1913 * In both cases a vote work and unvote work wait to be performed.
1914 * If ordering is not guaranteed, then the end state might be the
1915 * opposite of the desired state.
1916 */
1917 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1918 host->hba->host->host_no);
1919 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1920 if (!host->pm_qos.workq) {
1921 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1922 __func__);
1923 ret = -ENOMEM;
1924 goto free_groups;
1925 }
1926
1927 /* Initialization was ok, add all PM QoS requests */
1928 for (i = 0; i < host->pm_qos.num_groups; i++)
1929 pm_qos_add_request(&host->pm_qos.groups[i].req,
1930 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1931
1932 /* PM QoS latency sys-fs attribute */
1933 attr = &host->pm_qos.latency_attr;
1934 attr->show = ufs_qcom_pm_qos_latency_show;
1935 attr->store = ufs_qcom_pm_qos_latency_store;
1936 sysfs_attr_init(&attr->attr);
1937 attr->attr.name = "pm_qos_latency_us";
1938 attr->attr.mode = S_IRUGO | S_IWUSR;
1939 if (device_create_file(host->hba->var->dev, attr))
1940 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1941
1942 /* PM QoS enable sys-fs attribute */
1943 attr = &host->pm_qos.enable_attr;
1944 attr->show = ufs_qcom_pm_qos_enable_show;
1945 attr->store = ufs_qcom_pm_qos_enable_store;
1946 sysfs_attr_init(&attr->attr);
1947 attr->attr.name = "pm_qos_enable";
1948 attr->attr.mode = S_IRUGO | S_IWUSR;
1949 if (device_create_file(host->hba->var->dev, attr))
1950 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1951
1952 host->pm_qos.is_enabled = true;
1953
1954 return 0;
1955
1956free_groups:
1957 kfree(host->pm_qos.groups);
1958no_pm_qos:
1959 host->pm_qos.groups = NULL;
1960 return ret ? ret : -ENOTSUPP;
1961}
1962
1963static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1964{
1965 int i;
1966
1967 if (!host->pm_qos.groups)
1968 return;
1969
1970 for (i = 0; i < host->pm_qos.num_groups; i++)
1971 flush_work(&host->pm_qos.groups[i].unvote_work);
1972}
1973
1974static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1975{
1976 int i;
1977
1978 if (!host->pm_qos.groups)
1979 return;
1980
1981 for (i = 0; i < host->pm_qos.num_groups; i++)
1982 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1983 destroy_workqueue(host->pm_qos.workq);
1984
1985 kfree(host->pm_qos.groups);
1986 host->pm_qos.groups = NULL;
1987}
1988#endif /* CONFIG_SMP */
1989
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001990/*
1991 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
1992 */
1993static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
1994{
1995 struct device_node *node = host->hba->dev->of_node;
1996
1997 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
1998 if (host->disable_lpm)
1999 pr_info("%s: will disable all LPM modes\n", __func__);
2000}
2001
Subhash Jadavania889db02016-12-09 10:24:58 -08002002static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
2003{
2004 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2005 int id;
2006
2007 if (!hba->dev->of_node)
2008 return;
2009
2010 /* Extract platform data */
2011 id = of_alias_get_id(hba->dev->of_node, "ufshc");
2012 if (id <= 0)
2013 dev_err(hba->dev, "Failed to get host index %d\n", id);
2014 else if (id <= MAX_UFS_QCOM_HOSTS)
2015 ufs_qcom_hosts[id - 1] = host;
2016 else
2017 dev_err(hba->dev, "invalid host index %d\n", id);
2018}
2019
Subhash Jadavani9c807702017-04-01 00:35:51 -07002020static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
2021 struct ufs_vreg **out_vreg)
2022{
2023 int ret = 0;
2024 char prop_name[MAX_PROP_SIZE];
2025 struct ufs_vreg *vreg = NULL;
2026 struct device *dev = host->hba->dev;
2027 struct device_node *np = dev->of_node;
2028
2029 if (!np) {
2030 dev_err(dev, "%s: non DT initialization\n", __func__);
2031 goto out;
2032 }
2033
2034 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2035 if (!of_parse_phandle(np, prop_name, 0)) {
2036 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2037 __func__, prop_name);
2038 ret = -ENODEV;
2039 goto out;
2040 }
2041
2042 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2043 if (!vreg)
2044 return -ENOMEM;
2045
2046 vreg->name = name;
2047
2048 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2049 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2050 if (ret) {
2051 dev_err(dev, "%s: unable to find %s err %d\n",
2052 __func__, prop_name, ret);
2053 goto out;
2054 }
2055
2056 vreg->reg = devm_regulator_get(dev, vreg->name);
2057 if (IS_ERR(vreg->reg)) {
2058 ret = PTR_ERR(vreg->reg);
2059 dev_err(dev, "%s: %s get failed, err=%d\n",
2060 __func__, vreg->name, ret);
2061 }
2062 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2063 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2064
2065out:
2066 if (!ret)
2067 *out_vreg = vreg;
2068 return ret;
2069}
2070
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002071/**
2072 * ufs_qcom_init - bind phy with controller
2073 * @hba: host controller instance
2074 *
2075 * Binds PHY with controller and powers up PHY enabling clocks
2076 * and regulators.
2077 *
2078 * Returns -EPROBE_DEFER if binding fails, returns negative error
2079 * on phy power up failure and returns zero on success.
2080 */
2081static int ufs_qcom_init(struct ufs_hba *hba)
2082{
2083 int err;
2084 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002085 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002086 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002087 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002088
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002089 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2090 if (!host) {
2091 err = -ENOMEM;
2092 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2093 goto out;
2094 }
2095
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002096 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002097 host->hba = hba;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002098 spin_lock_init(&host->ice_work_lock);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002099
Subhash Jadavani9c807702017-04-01 00:35:51 -07002100 ufshcd_set_variant(hba, host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002101
2102 err = ufs_qcom_ice_get_dev(host);
2103 if (err == -EPROBE_DEFER) {
2104 /*
2105 * UFS driver might be probed before ICE driver does.
2106 * In that case we would like to return EPROBE_DEFER code
2107 * in order to delay its probing.
2108 */
2109 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2110 __func__, err);
2111 goto out_host_free;
2112
2113 } else if (err == -ENODEV) {
2114 /*
2115 * ICE device is not enabled in DTS file. No need for further
2116 * initialization of ICE driver.
2117 */
2118 dev_warn(dev, "%s: ICE device is not enabled",
2119 __func__);
2120 } else if (err) {
2121 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2122 __func__, err);
2123 goto out_host_free;
2124 }
2125
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002126 host->generic_phy = devm_phy_get(dev, "ufsphy");
2127
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002128 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2129 /*
2130 * UFS driver might be probed before the phy driver does.
2131 * In that case we would like to return EPROBE_DEFER code.
2132 */
2133 err = -EPROBE_DEFER;
2134 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2135 __func__, err);
2136 goto out_host_free;
2137 } else if (IS_ERR(host->generic_phy)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002138 err = PTR_ERR(host->generic_phy);
2139 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
2140 goto out;
2141 }
2142
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002143 err = ufs_qcom_pm_qos_init(host);
2144 if (err)
2145 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2146
2147 /* restore the secure configuration */
2148 ufs_qcom_update_sec_cfg(hba, true);
2149
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002150 err = ufs_qcom_bus_register(host);
2151 if (err)
2152 goto out_host_free;
2153
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002154 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2155 &host->hw_ver.minor, &host->hw_ver.step);
2156
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002157 /*
2158 * for newer controllers, device reference clock control bit has
2159 * moved inside UFS controller register address space itself.
2160 */
2161 if (host->hw_ver.major >= 0x02) {
2162 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2163 host->dev_ref_clk_en_mask = BIT(26);
2164 } else {
2165 /* "dev_ref_clk_ctrl_mem" is optional resource */
2166 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2167 if (res) {
2168 host->dev_ref_clk_ctrl_mmio =
2169 devm_ioremap_resource(dev, res);
2170 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2171 dev_warn(dev,
2172 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2173 __func__,
2174 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2175 host->dev_ref_clk_ctrl_mmio = NULL;
2176 }
2177 host->dev_ref_clk_en_mask = BIT(5);
2178 }
2179 }
2180
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002181 /* update phy revision information before calling phy_init() */
2182 ufs_qcom_phy_save_controller_version(host->generic_phy,
2183 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2184
Subhash Jadavani9c807702017-04-01 00:35:51 -07002185 err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2186 &host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002187 phy_init(host->generic_phy);
2188 err = phy_power_on(host->generic_phy);
2189 if (err)
2190 goto out_unregister_bus;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002191 if (host->vddp_ref_clk) {
2192 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2193 if (err) {
2194 dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2195 __func__, err);
2196 goto out_disable_phy;
2197 }
2198 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002199
2200 err = ufs_qcom_init_lane_clks(host);
2201 if (err)
Subhash Jadavani9c807702017-04-01 00:35:51 -07002202 goto out_disable_vddp;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002203
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002204 ufs_qcom_parse_lpm(host);
2205 if (host->disable_lpm)
2206 pm_runtime_forbid(host->hba->dev);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002207 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002208 ufs_qcom_advertise_quirks(hba);
2209
Subhash Jadavani9c807702017-04-01 00:35:51 -07002210 ufs_qcom_set_bus_vote(hba, true);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002211 ufs_qcom_setup_clocks(hba, true, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002212
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002213 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2214 ufs_qcom_get_default_testbus_cfg(host);
2215 err = ufs_qcom_testbus_config(host);
2216 if (err) {
2217 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2218 __func__, err);
2219 err = 0;
2220 }
2221
Subhash Jadavania889db02016-12-09 10:24:58 -08002222 ufs_qcom_save_host_ptr(hba);
2223
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002224 goto out;
2225
Subhash Jadavani9c807702017-04-01 00:35:51 -07002226out_disable_vddp:
2227 if (host->vddp_ref_clk)
2228 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002229out_disable_phy:
2230 phy_power_off(host->generic_phy);
2231out_unregister_bus:
2232 phy_exit(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002233 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002234out_host_free:
2235 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002236 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002237out:
2238 return err;
2239}
2240
2241static void ufs_qcom_exit(struct ufs_hba *hba)
2242{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002243 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002244
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002245 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002246 ufs_qcom_disable_lane_clks(host);
2247 phy_power_off(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002248 ufs_qcom_pm_qos_remove(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002249}
2250
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002251static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2252 u32 clk_cycles)
2253{
2254 int err;
2255 u32 core_clk_ctrl_reg;
2256
2257 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2258 return -EINVAL;
2259
2260 err = ufshcd_dme_get(hba,
2261 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2262 &core_clk_ctrl_reg);
2263 if (err)
2264 goto out;
2265
2266 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2267 core_clk_ctrl_reg |= clk_cycles;
2268
2269 /* Clear CORE_CLK_DIV_EN */
2270 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2271
2272 err = ufshcd_dme_set(hba,
2273 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2274 core_clk_ctrl_reg);
2275out:
2276 return err;
2277}
2278
2279static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2280{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002281 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002282 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2283 int err = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002284
2285 if (!ufs_qcom_cap_qunipro(host))
Subhash Jadavani9c807702017-04-01 00:35:51 -07002286 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002287
Subhash Jadavani9c807702017-04-01 00:35:51 -07002288 if (attr)
2289 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2290 attr->hs_rate, false, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002291
2292 /* set unipro core clock cycles to 150 and clear clock divider */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002293 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2294out:
2295 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002296}
2297
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002298static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2299{
2300 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002301 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002302 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002303
2304 if (!ufs_qcom_cap_qunipro(host))
2305 return 0;
2306
Subhash Jadavani9c807702017-04-01 00:35:51 -07002307 if (attr)
2308 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2309 attr->hs_rate, false);
2310
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002311 if (ufs_qcom_cap_svs2(host))
2312 /*
2313 * For SVS2 set unipro core clock cycles to 37 and
2314 * clear clock divider
2315 */
2316 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2317 else
2318 /*
2319 * For SVS set unipro core clock cycles to 75 and
2320 * clear clock divider
2321 */
2322 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2323
2324 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002325}
2326
2327static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2328 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002329{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002330 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002331 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002332
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002333 switch (status) {
2334 case PRE_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002335 if (scale_up)
2336 err = ufs_qcom_clk_scale_up_pre_change(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002337 break;
2338 case POST_CHANGE:
Subhash Jadavani9c807702017-04-01 00:35:51 -07002339 if (!scale_up)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002340 err = ufs_qcom_clk_scale_down_post_change(hba);
2341
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002342 ufs_qcom_update_bus_bw_vote(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002343 break;
2344 default:
2345 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2346 err = -EINVAL;
2347 break;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002348 }
2349
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002350 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002351}
2352
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002353/*
2354 * This function should be called to restore the security configuration of UFS
2355 * register space after coming out of UFS host core power collapse.
2356 *
2357 * @hba: host controller instance
2358 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2359 * and set "false" when secure configuration is lost.
2360 */
2361static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2362{
2363 return 0;
2364}
2365
2366
2367static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2368{
2369 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2370
2371 if (ufs_qcom_cap_svs2(host))
2372 return UFS_HS_G1;
2373 /* Default SVS support @ HS G2 frequencies*/
2374 return UFS_HS_G2;
2375}
2376
2377void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2378 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2379 char *str, void *priv))
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002380{
2381 u32 reg;
2382 struct ufs_qcom_host *host;
2383
2384 if (unlikely(!hba)) {
2385 pr_err("%s: hba is NULL\n", __func__);
2386 return;
2387 }
2388 if (unlikely(!print_fn)) {
2389 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2390 return;
2391 }
2392
2393 host = ufshcd_get_variant(hba);
2394 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2395 return;
2396
2397 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2398 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2399
2400 reg = ufshcd_readl(hba, REG_UFS_CFG1);
2401 reg |= UFS_BIT(17);
2402 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2403
2404 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2405 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2406
2407 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2408 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2409
2410 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2411 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2412
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002413 /* clear bit 17 - UTP_DBG_RAMS_EN */
2414 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002415
2416 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2417 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2418
2419 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2420 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2421
2422 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2423 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2424
2425 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2426 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2427
2428 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2429 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2430
2431 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2432 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2433
2434 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2435 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2436}
2437
2438static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2439{
Subhash Jadavani9c807702017-04-01 00:35:51 -07002440 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2441 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2442 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002443 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002444 } else {
2445 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002446 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002447 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002448}
2449
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002450static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2451{
2452 /* provide a legal default configuration */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002453 host->testbus.select_major = TSTBUS_UNIPRO;
2454 host->testbus.select_minor = 37;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002455}
2456
2457static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
2458{
2459 if (host->testbus.select_major >= TSTBUS_MAX) {
2460 dev_err(host->hba->dev,
2461 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
2462 __func__, host->testbus.select_major);
2463 return false;
2464 }
2465
2466 /*
2467 * Not performing check for each individual select_major
2468 * mappings of select_minor, since there is no harm in
2469 * configuring a non-existent select_minor
2470 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07002471 if (host->testbus.select_minor > 0xFF) {
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002472 dev_err(host->hba->dev,
2473 "%s: 0x%05X is not a legal testbus option\n",
2474 __func__, host->testbus.select_minor);
2475 return false;
2476 }
2477
2478 return true;
2479}
2480
Subhash Jadavani9c807702017-04-01 00:35:51 -07002481/*
2482 * The caller of this function must make sure that the controller
2483 * is out of runtime suspend and appropriate clocks are enabled
2484 * before accessing.
2485 */
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002486int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2487{
2488 int reg;
2489 int offset;
2490 u32 mask = TEST_BUS_SUB_SEL_MASK;
2491
2492 if (!host)
2493 return -EINVAL;
2494
2495 if (!ufs_qcom_testbus_cfg_is_ok(host))
2496 return -EPERM;
2497
2498 switch (host->testbus.select_major) {
2499 case TSTBUS_UAWM:
2500 reg = UFS_TEST_BUS_CTRL_0;
2501 offset = 24;
2502 break;
2503 case TSTBUS_UARM:
2504 reg = UFS_TEST_BUS_CTRL_0;
2505 offset = 16;
2506 break;
2507 case TSTBUS_TXUC:
2508 reg = UFS_TEST_BUS_CTRL_0;
2509 offset = 8;
2510 break;
2511 case TSTBUS_RXUC:
2512 reg = UFS_TEST_BUS_CTRL_0;
2513 offset = 0;
2514 break;
2515 case TSTBUS_DFC:
2516 reg = UFS_TEST_BUS_CTRL_1;
2517 offset = 24;
2518 break;
2519 case TSTBUS_TRLUT:
2520 reg = UFS_TEST_BUS_CTRL_1;
2521 offset = 16;
2522 break;
2523 case TSTBUS_TMRLUT:
2524 reg = UFS_TEST_BUS_CTRL_1;
2525 offset = 8;
2526 break;
2527 case TSTBUS_OCSC:
2528 reg = UFS_TEST_BUS_CTRL_1;
2529 offset = 0;
2530 break;
2531 case TSTBUS_WRAPPER:
2532 reg = UFS_TEST_BUS_CTRL_2;
2533 offset = 16;
2534 break;
2535 case TSTBUS_COMBINED:
2536 reg = UFS_TEST_BUS_CTRL_2;
2537 offset = 8;
2538 break;
2539 case TSTBUS_UTP_HCI:
2540 reg = UFS_TEST_BUS_CTRL_2;
2541 offset = 0;
2542 break;
2543 case TSTBUS_UNIPRO:
2544 reg = UFS_UNIPRO_CFG;
Subhash Jadavani9c807702017-04-01 00:35:51 -07002545 offset = 20;
2546 mask = 0xFFF;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002547 break;
2548 /*
2549 * No need for a default case, since
2550 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2551 * is legal
2552 */
2553 }
2554 mask <<= offset;
2555
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002556 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2557 (u32)host->testbus.select_major << 19,
2558 REG_UFS_CFG1);
2559 ufshcd_rmwl(host->hba, mask,
2560 (u32)host->testbus.select_minor << offset,
2561 reg);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002562 ufs_qcom_enable_test_bus(host);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002563 /*
2564 * Make sure the test bus configuration is
2565 * committed before returning.
2566 */
2567 mb();
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002568
2569 return 0;
2570}
2571
2572static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2573{
2574 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2575}
2576
Subhash Jadavani9c807702017-04-01 00:35:51 -07002577static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002578{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002579 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002580 u32 *testbus = NULL;
2581 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2582
2583 testbus = kmalloc(testbus_len, GFP_KERNEL);
2584 if (!testbus)
2585 return;
2586
2587 host->testbus.select_major = TSTBUS_UNIPRO;
2588 for (i = 0; i < nminor; i++) {
2589 host->testbus.select_minor = i;
2590 ufs_qcom_testbus_config(host);
2591 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2592 }
2593 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2594 16, 4, testbus, testbus_len, false);
2595 kfree(testbus);
2596}
2597
2598static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2599{
2600 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2601 struct phy *phy = host->generic_phy;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002602
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002603 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2604 "HCI Vendor Specific Registers ");
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002605 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002606
2607 if (no_sleep)
2608 return;
2609
2610 /* sleep a bit intermittently as we are dumping too much data */
2611 usleep_range(1000, 1100);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002612 ufs_qcom_testbus_read(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07002613 usleep_range(1000, 1100);
2614 ufs_qcom_print_unipro_testbus(hba);
2615 usleep_range(1000, 1100);
2616 ufs_qcom_phy_dbg_register_dump(phy);
2617 usleep_range(1000, 1100);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002618 ufs_qcom_ice_print_regs(host);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002619}
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002620
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002621/**
2622 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2623 *
2624 * The variant operations configure the necessary controller and PHY
2625 * handshake during initialization.
2626 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02002627static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002628 .init = ufs_qcom_init,
2629 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03002630 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002631 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2632 .setup_clocks = ufs_qcom_setup_clocks,
2633 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2634 .link_startup_notify = ufs_qcom_link_startup_notify,
2635 .pwr_change_notify = ufs_qcom_pwr_change_notify,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002636 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002637 .suspend = ufs_qcom_suspend,
2638 .resume = ufs_qcom_resume,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002639 .full_reset = ufs_qcom_full_reset,
2640 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2641 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
Subhash Jadavani9c807702017-04-01 00:35:51 -07002642 .set_bus_vote = ufs_qcom_set_bus_vote,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002643 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002644#ifdef CONFIG_DEBUG_FS
2645 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2646#endif
2647};
2648
2649static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2650 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2651 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2652 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2653 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2654 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2655};
2656
2657static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2658 .req_start = ufs_qcom_pm_qos_req_start,
2659 .req_end = ufs_qcom_pm_qos_req_end,
2660};
2661
2662static struct ufs_hba_variant ufs_hba_qcom_variant = {
2663 .name = "qcom",
2664 .vops = &ufs_hba_qcom_vops,
2665 .crypto_vops = &ufs_hba_crypto_variant_ops,
2666 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002667};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002668
Yaniv Gardi47555a52015-10-28 13:15:49 +02002669/**
2670 * ufs_qcom_probe - probe routine of the driver
2671 * @pdev: pointer to Platform device handle
2672 *
2673 * Return zero for success and non-zero for failure
2674 */
2675static int ufs_qcom_probe(struct platform_device *pdev)
2676{
2677 int err;
2678 struct device *dev = &pdev->dev;
2679
2680 /* Perform generic probe */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002681 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002682 if (err)
2683 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2684
2685 return err;
2686}
2687
2688/**
2689 * ufs_qcom_remove - set driver_data of the device to NULL
2690 * @pdev: pointer to platform device handle
2691 *
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002692 * Always return 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02002693 */
2694static int ufs_qcom_remove(struct platform_device *pdev)
2695{
2696 struct ufs_hba *hba = platform_get_drvdata(pdev);
2697
2698 pm_runtime_get_sync(&(pdev)->dev);
2699 ufshcd_remove(hba);
2700 return 0;
2701}
2702
2703static const struct of_device_id ufs_qcom_of_match[] = {
2704 { .compatible = "qcom,ufshc"},
2705 {},
2706};
2707
2708static const struct dev_pm_ops ufs_qcom_pm_ops = {
2709 .suspend = ufshcd_pltfrm_suspend,
2710 .resume = ufshcd_pltfrm_resume,
2711 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2712 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2713 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2714};
2715
2716static struct platform_driver ufs_qcom_pltform = {
2717 .probe = ufs_qcom_probe,
2718 .remove = ufs_qcom_remove,
2719 .shutdown = ufshcd_pltfrm_shutdown,
2720 .driver = {
2721 .name = "ufshcd-qcom",
2722 .pm = &ufs_qcom_pm_ops,
2723 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2724 },
2725};
2726module_platform_driver(ufs_qcom_pltform);
2727
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002728MODULE_LICENSE("GPL v2");