blob: 95c212c798fafae527f8fa3b01b291e501e60f75 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Can Guoe0c627d2018-03-05 20:15:10 +08002 * Copyright (c) 2013-2019, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020019#include <linux/phy/phy-qcom-ufs.h>
Can Guo2d8e79c2018-06-25 01:05:49 -070020#include <linux/clk/qcom.h>
21
22#ifdef CONFIG_QCOM_BUS_SCALING
23#include <linux/msm-bus.h>
24#endif
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020025
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020026#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020027#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020028#include "unipro.h"
29#include "ufs-qcom.h"
30#include "ufshci.h"
Can Guo2d8e79c2018-06-25 01:05:49 -070031#include "ufs-qcom-ice.h"
32#include "ufs-qcom-debugfs.h"
Subhash Jadavani56d4a182016-12-05 19:25:32 -080033#include "ufs_quirks.h"
Can Guo2d8e79c2018-06-25 01:05:49 -070034
35#define MAX_PROP_SIZE 32
36#define VDDP_REF_CLK_MIN_UV 1200000
37#define VDDP_REF_CLK_MAX_UV 1200000
38/* TODO: further tuning for this parameter may be required */
39#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
40
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020041#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
42 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
43
44enum {
45 TSTBUS_UAWM,
46 TSTBUS_UARM,
47 TSTBUS_TXUC,
48 TSTBUS_RXUC,
49 TSTBUS_DFC,
50 TSTBUS_TRLUT,
51 TSTBUS_TMRLUT,
52 TSTBUS_OCSC,
53 TSTBUS_UTP_HCI,
54 TSTBUS_COMBINED,
55 TSTBUS_WRAPPER,
56 TSTBUS_UNIPRO,
57 TSTBUS_MAX,
58};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020059
60static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
61
Can Guo2d8e79c2018-06-25 01:05:49 -070062static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020063static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020064static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
Can Guo2d8e79c2018-06-25 01:05:49 -070065 u32 clk_1us_cycles,
66 u32 clk_40ns_cycles);
67static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
68
69static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
70 char *prefix)
71{
72 print_hex_dump(KERN_ERR, prefix,
73 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
74 16, 4, (void *)hba->mmio_base + offset, len * 4, false);
75}
Yaniv Gardif06fcc72015-10-28 13:15:51 +020076
Yaniv Gardieba5ed32016-03-10 17:37:21 +020077static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
Can Guo2d8e79c2018-06-25 01:05:49 -070078 char *prefix, void *priv)
Yaniv Gardieba5ed32016-03-10 17:37:21 +020079{
Can Guo2d8e79c2018-06-25 01:05:49 -070080 ufs_qcom_dump_regs(hba, offset, len, prefix);
Yaniv Gardieba5ed32016-03-10 17:37:21 +020081}
82
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020083static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
84{
85 int err = 0;
86
87 err = ufshcd_dme_get(hba,
88 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
89 if (err)
90 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
91 __func__, err);
92
93 return err;
94}
95
96static int ufs_qcom_host_clk_get(struct device *dev,
97 const char *name, struct clk **clk_out)
98{
99 struct clk *clk;
100 int err = 0;
101
102 clk = devm_clk_get(dev, name);
Can Guo2d8e79c2018-06-25 01:05:49 -0700103 if (IS_ERR(clk))
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200104 err = PTR_ERR(clk);
Can Guo2d8e79c2018-06-25 01:05:49 -0700105 else
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200106 *clk_out = clk;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200107
108 return err;
109}
110
111static int ufs_qcom_host_clk_enable(struct device *dev,
112 const char *name, struct clk *clk)
113{
114 int err = 0;
115
116 err = clk_prepare_enable(clk);
117 if (err)
118 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
119
120 return err;
121}
122
123static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
124{
125 if (!host->is_lane_clks_enabled)
126 return;
127
Can Guo2d8e79c2018-06-25 01:05:49 -0700128 if (host->tx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200129 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200130 clk_disable_unprepare(host->tx_l0_sync_clk);
Can Guo2d8e79c2018-06-25 01:05:49 -0700131 if (host->rx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200132 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200133 clk_disable_unprepare(host->rx_l0_sync_clk);
134
135 host->is_lane_clks_enabled = false;
136}
137
138static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
139{
140 int err = 0;
141 struct device *dev = host->hba->dev;
142
143 if (host->is_lane_clks_enabled)
144 return 0;
145
146 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
147 host->rx_l0_sync_clk);
148 if (err)
149 goto out;
150
151 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
152 host->tx_l0_sync_clk);
153 if (err)
154 goto disable_rx_l0;
155
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200156 if (host->hba->lanes_per_direction > 1) {
157 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
158 host->rx_l1_sync_clk);
159 if (err)
160 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200161
Can Guo2d8e79c2018-06-25 01:05:49 -0700162 /* The tx lane1 clk could be muxed, hence keep this optional */
163 if (host->tx_l1_sync_clk)
164 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
165 host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200166 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200167
168 host->is_lane_clks_enabled = true;
169 goto out;
170
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200171disable_tx_l0:
172 clk_disable_unprepare(host->tx_l0_sync_clk);
173disable_rx_l0:
174 clk_disable_unprepare(host->rx_l0_sync_clk);
175out:
176 return err;
177}
178
179static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
180{
181 int err = 0;
182 struct device *dev = host->hba->dev;
183
184 err = ufs_qcom_host_clk_get(dev,
185 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
Can Guo2d8e79c2018-06-25 01:05:49 -0700186 if (err) {
187 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d\n",
188 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200189 goto out;
Can Guo2d8e79c2018-06-25 01:05:49 -0700190 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200191
192 err = ufs_qcom_host_clk_get(dev,
193 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
Can Guo2d8e79c2018-06-25 01:05:49 -0700194 if (err) {
195 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d\n",
196 __func__, err);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200197 goto out;
Can Guo2d8e79c2018-06-25 01:05:49 -0700198 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200199
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200200 /* In case of single lane per direction, don't read lane1 clocks */
201 if (host->hba->lanes_per_direction > 1) {
202 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
203 &host->rx_l1_sync_clk);
Can Guo2d8e79c2018-06-25 01:05:49 -0700204 if (err) {
205 dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d\n",
206 __func__, err);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200207 goto out;
Can Guo2d8e79c2018-06-25 01:05:49 -0700208 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200209
Can Guo2d8e79c2018-06-25 01:05:49 -0700210 /* The tx lane1 clk could be muxed, hence keep this optional */
211 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
212 &host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200213 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200214out:
215 return err;
216}
217
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200218static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
219{
220 int err;
221 u32 tx_fsm_val = 0;
222 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
223
224 do {
225 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200226 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
227 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
228 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200229 if (err || tx_fsm_val == TX_FSM_HIBERN8)
230 break;
231
232 /* sleep for max. 200us */
233 usleep_range(100, 200);
234 } while (time_before(jiffies, timeout));
235
236 /*
237 * we might have scheduled out for long during polling so
238 * check the state again.
239 */
240 if (time_after(jiffies, timeout))
241 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200242 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
243 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
244 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200245
246 if (err) {
247 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
248 __func__, err);
249 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
250 err = tx_fsm_val;
251 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
252 __func__, err);
253 }
254
255 return err;
256}
257
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200258static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
259{
260 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
261 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
262 REG_UFS_CFG1);
263 /* make sure above configuration is applied before we return */
264 mb();
265}
266
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200267static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
268{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200269 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200270 struct phy *phy = host->generic_phy;
271 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200272 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
273 ? true : false;
274
275 /* Assert PHY reset and apply PHY calibration values */
276 ufs_qcom_assert_reset(hba);
277 /* provide 1ms delay to let the reset pulse propagate */
278 usleep_range(1000, 1100);
279
Can Guo2d8e79c2018-06-25 01:05:49 -0700280 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
281
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200282 if (ret) {
Can Guo2d8e79c2018-06-25 01:05:49 -0700283 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200284 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200285 goto out;
286 }
287
288 /* De-assert PHY reset and start serdes */
289 ufs_qcom_deassert_reset(hba);
290
291 /*
292 * after reset deassertion, phy will need all ref clocks,
293 * voltage, current to settle down before starting serdes.
294 */
295 usleep_range(1000, 1100);
Can Guo2d8e79c2018-06-25 01:05:49 -0700296 ret = ufs_qcom_phy_start_serdes(phy);
Vivek Gautam052553a2017-10-12 11:49:36 +0530297 if (ret) {
Can Guo2d8e79c2018-06-25 01:05:49 -0700298 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200299 __func__, ret);
Can Guo2d8e79c2018-06-25 01:05:49 -0700300 goto out;
Vivek Gautam052553a2017-10-12 11:49:36 +0530301 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200302
Can Guo2d8e79c2018-06-25 01:05:49 -0700303 ret = ufs_qcom_phy_is_pcs_ready(phy);
304 if (ret)
305 dev_err(hba->dev,
306 "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
307 __func__, ret);
308
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200309 ufs_qcom_select_unipro_mode(host);
310
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200311out:
312 return ret;
Can Guo2d8e79c2018-06-25 01:05:49 -0700313
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200314}
315
316/*
317 * The UTP controller has a number of internal clock gating cells (CGCs).
318 * Internal hardware sub-modules within the UTP controller control the CGCs.
319 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
320 * in a specific operation, UTP controller CGCs are by default disabled and
321 * this function enables them (after every UFS link startup) to save some power
322 * leakage.
Can Guo2d8e79c2018-06-25 01:05:49 -0700323 *
324 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
325 * in Qunipro, enable them to save additional power.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200326 */
Can Guo2d8e79c2018-06-25 01:05:49 -0700327static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200328{
Can Guo2d8e79c2018-06-25 01:05:49 -0700329 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
330 int err = 0;
331
332 /* Enable UTP internal clock gating */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200333 ufshcd_writel(hba,
334 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
335 REG_UFS_CFG2);
336
337 /* Ensure that HW clock gating is enabled before next operations */
338 mb();
Can Guo2d8e79c2018-06-25 01:05:49 -0700339
340 /* Enable Qunipro internal clock gating if supported */
341 if (!ufs_qcom_cap_qunipro_clk_gating(host))
342 goto out;
343
344 /* Enable all the mask bits */
345 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
346 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
347 if (err)
348 goto out;
349
350 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
351 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
352 if (err)
353 goto out;
354
355 if (!((host->hw_ver.major == 4) && (host->hw_ver.minor == 0) &&
356 (host->hw_ver.step == 0))) {
357 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
358 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
359 DME_VS_CORE_CLK_CTRL);
360 } else {
361 dev_err(hba->dev, "%s: skipping DME_HW_CGC_EN set\n",
362 __func__);
363 }
364out:
365 return err;
366}
367
368static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
369{
370 struct ufs_clk_info *clki;
371
372 /*
373 * Configure the behavior of ufs clocks core and peripheral
374 * memory state when they are turned off.
375 * This configuration is required to allow retaining
376 * ICE crypto configuration (including keys) when
377 * core_clk_ice is turned off, and powering down
378 * non-ICE RAMs of host controller.
379 */
380 list_for_each_entry(clki, &hba->clk_list_head, list) {
381 if (!strcmp(clki->name, "core_clk_ice"))
382 clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
383 else
384 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
385 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
386 clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
387 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200388}
389
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200390static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
391 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200392{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200393 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200394 int err = 0;
395
396 switch (status) {
397 case PRE_CHANGE:
Can Guo2d8e79c2018-06-25 01:05:49 -0700398 ufs_qcom_force_mem_config(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200399 ufs_qcom_power_up_sequence(hba);
400 /*
401 * The PHY PLL output is the source of tx/rx lane symbol
402 * clocks, hence, enable the lane clocks only after PHY
403 * is initialized.
404 */
405 err = ufs_qcom_enable_lane_clks(host);
Can Guo2d8e79c2018-06-25 01:05:49 -0700406 if (!err && host->ice.pdev) {
407 err = ufs_qcom_ice_init(host);
408 if (err) {
409 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
410 __func__, err);
411 err = -EINVAL;
412 }
413 }
414
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200415 break;
416 case POST_CHANGE:
417 /* check if UFS PHY moved from DISABLED to HIBERN8 */
418 err = ufs_qcom_check_hibern8(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200419 break;
420 default:
421 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
422 err = -EINVAL;
423 break;
424 }
425 return err;
426}
427
428/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200429 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200430 */
Can Guo2d8e79c2018-06-25 01:05:49 -0700431static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
432 u32 hs, u32 rate, bool update_link_startup_timer,
433 bool is_pre_scale_up)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200434{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200435 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200436 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200437 struct ufs_clk_info *clki;
438 u32 core_clk_period_in_ns;
439 u32 tx_clk_cycles_per_us = 0;
440 unsigned long core_clk_rate = 0;
441 u32 core_clk_cycles_per_us = 0;
442
443 static u32 pwm_fr_table[][2] = {
444 {UFS_PWM_G1, 0x1},
445 {UFS_PWM_G2, 0x1},
446 {UFS_PWM_G3, 0x1},
447 {UFS_PWM_G4, 0x1},
448 };
449
450 static u32 hs_fr_table_rA[][2] = {
451 {UFS_HS_G1, 0x1F},
452 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200453 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200454 };
455
456 static u32 hs_fr_table_rB[][2] = {
457 {UFS_HS_G1, 0x24},
458 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200459 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200460 };
461
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300462 /*
463 * The Qunipro controller does not use following registers:
464 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
465 * UFS_REG_PA_LINK_STARTUP_TIMER
466 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
Can Guo2d8e79c2018-06-25 01:05:49 -0700467 * Aggregation / Auto hibern8 logic.
468 * It is mandatory to write SYS1CLK_1US_REG register on UFS host
469 * controller V4.0.0 onwards.
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300470 */
Can Guo2d8e79c2018-06-25 01:05:49 -0700471 if (ufs_qcom_cap_qunipro(host) &&
472 (!(ufshcd_is_intr_aggr_allowed(hba) ||
473 ufshcd_is_auto_hibern8_supported(hba) ||
474 host->hw_ver.major >= 4)))
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300475 goto out;
476
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200477 if (gear == 0) {
478 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
479 goto out_error;
480 }
481
482 list_for_each_entry(clki, &hba->clk_list_head, list) {
Can Guo2d8e79c2018-06-25 01:05:49 -0700483 if (!strcmp(clki->name, "core_clk")) {
484 if (is_pre_scale_up)
485 core_clk_rate = clki->max_freq;
486 else
487 core_clk_rate = clk_get_rate(clki->clk);
488 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200489 }
490
491 /* If frequency is smaller than 1MHz, set to 1MHz */
492 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
493 core_clk_rate = DEFAULT_CLK_RATE_HZ;
494
495 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200496 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
497 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
498 /*
499 * make sure above write gets applied before we return from
500 * this function.
501 */
502 mb();
503 }
504
505 if (ufs_qcom_cap_qunipro(host))
506 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200507
508 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
509 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
510 core_clk_period_in_ns &= MASK_CLK_NS_REG;
511
512 switch (hs) {
513 case FASTAUTO_MODE:
514 case FAST_MODE:
515 if (rate == PA_HS_MODE_A) {
516 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
517 dev_err(hba->dev,
518 "%s: index %d exceeds table size %zu\n",
519 __func__, gear,
520 ARRAY_SIZE(hs_fr_table_rA));
521 goto out_error;
522 }
523 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
524 } else if (rate == PA_HS_MODE_B) {
525 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
526 dev_err(hba->dev,
527 "%s: index %d exceeds table size %zu\n",
528 __func__, gear,
529 ARRAY_SIZE(hs_fr_table_rB));
530 goto out_error;
531 }
532 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
533 } else {
534 dev_err(hba->dev, "%s: invalid rate = %d\n",
535 __func__, rate);
536 goto out_error;
537 }
538 break;
539 case SLOWAUTO_MODE:
540 case SLOW_MODE:
541 if (gear > ARRAY_SIZE(pwm_fr_table)) {
542 dev_err(hba->dev,
543 "%s: index %d exceeds table size %zu\n",
544 __func__, gear,
545 ARRAY_SIZE(pwm_fr_table));
546 goto out_error;
547 }
548 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
549 break;
550 case UNCHANGED:
551 default:
552 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
553 goto out_error;
554 }
555
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200556 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
557 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
558 /* this register 2 fields shall be written at once */
559 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
560 REG_UFS_TX_SYMBOL_CLK_NS_US);
561 /*
562 * make sure above write gets applied before we return from
563 * this function.
564 */
565 mb();
566 }
567
568 if (update_link_startup_timer) {
569 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
570 REG_UFS_PA_LINK_STARTUP_TIMER);
571 /*
572 * make sure that this configuration is applied before
573 * we return
574 */
575 mb();
576 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200577 goto out;
578
579out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200580 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200581out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200582 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200583}
584
Can Guo2d8e79c2018-06-25 01:05:49 -0700585static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
586 u32 hs, u32 rate, bool update_link_startup_timer)
587{
588 return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
589 update_link_startup_timer, false);
590}
591
592static int ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(struct ufs_hba *hba)
593{
594 struct ufs_clk_info *clki;
595 struct list_head *head = &hba->clk_list_head;
596 u32 max_freq = 0;
597 int err = 0;
598
599 list_for_each_entry(clki, head, list) {
600 if (!IS_ERR_OR_NULL(clki->clk) &&
601 (!strcmp(clki->name, "core_clk_unipro"))) {
602 max_freq = clki->max_freq;
603 break;
604 }
605 }
606
607 switch (max_freq) {
608 case 300000000:
609 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 300, 12);
610 break;
611 case 150000000:
612 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150, 6);
613 break;
614 default:
615 err = -EINVAL;
616 break;
617 }
618
619 return err;
620}
621
622static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
623{
624 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
625 struct phy *phy = host->generic_phy;
626 u32 unipro_ver;
627 int err = 0;
628
629 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
630 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
631 __func__);
632 err = -EINVAL;
633 goto out;
634 }
635
636 /* make sure RX LineCfg is enabled before link startup */
637 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
638 if (err)
639 goto out;
640
641 if (ufs_qcom_cap_qunipro(host)) {
642 err = ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(hba);
643 if (err)
644 goto out;
645 }
646
647 err = ufs_qcom_enable_hw_clk_gating(hba);
648 if (err)
649 goto out;
650
651 /*
652 * Some UFS devices (and may be host) have issues if LCC is
653 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
654 * before link startup which will make sure that both host
655 * and device TX LCC are disabled once link startup is
656 * completed.
657 */
658 unipro_ver = ufshcd_get_local_unipro_ver(hba);
659 if (unipro_ver != UFS_UNIPRO_VER_1_41)
660 err = ufshcd_dme_set(hba,
661 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
662 0);
663 if (err)
664 goto out;
665
666 if (!ufs_qcom_cap_qunipro_clk_gating(host))
667 goto out;
668
669 /* Enable all the mask bits */
670 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
671 SAVECONFIGTIME_MODE_MASK,
672 PA_VS_CONFIG_REG1);
673out:
674 return err;
675}
676
677static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
678{
679 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
680 struct phy *phy = host->generic_phy;
681 u32 tx_lanes;
682 int err = 0;
683
684 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
685 if (err)
686 goto out;
687
688 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
689 if (err) {
690 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
691 __func__);
692 goto out;
693 }
694
695 /*
696 * Some UFS devices send incorrect LineCfg data as part of power mode
697 * change sequence which may cause host PHY to go into bad state.
698 * Disabling Rx LineCfg of host PHY should help avoid this.
699 */
700 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
701 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
702 if (err) {
703 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
704 __func__);
705 goto out;
706 }
707
708 /*
709 * UFS controller has *clk_req output to GCC, for each one if the clocks
710 * entering it. When *clk_req for a specific clock is de-asserted,
711 * a corresponding clock from GCC is stopped. UFS controller de-asserts
712 * *clk_req outputs when it is in Auto Hibernate state only if the
713 * Clock request feature is enabled.
714 * Enable the Clock request feature:
715 * - Enable HW clock control for UFS clocks in GCC (handled by the
716 * clock driver as part of clk_prepare_enable).
717 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
718 */
719 if (ufshcd_is_auto_hibern8_supported(hba))
720 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
721 UFS_HW_CLK_CTRL_EN,
722 UFS_AH8_CFG);
723 /*
724 * Make sure clock request feature gets enabled for HW clk gating
725 * before further operations.
726 */
727 mb();
728
729out:
730 return err;
731}
732
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200733static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
734 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200735{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200736 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200737
738 switch (status) {
739 case PRE_CHANGE:
Can Guo2d8e79c2018-06-25 01:05:49 -0700740 err = ufs_qcom_link_startup_pre_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200741 break;
742 case POST_CHANGE:
Can Guo2d8e79c2018-06-25 01:05:49 -0700743 err = ufs_qcom_link_startup_post_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200744 break;
745 default:
746 break;
747 }
748
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200749 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200750}
751
Can Guo2d8e79c2018-06-25 01:05:49 -0700752static int ufs_qcom_config_vreg(struct device *dev,
753 struct ufs_vreg *vreg, bool on)
754{
755 int ret = 0;
756 struct regulator *reg;
757 int min_uV, uA_load;
758
759 if (!vreg) {
760 WARN_ON(1);
761 ret = -EINVAL;
762 goto out;
763 }
764
765 reg = vreg->reg;
766 if (regulator_count_voltages(reg) > 0) {
Can Guoe0c627d2018-03-05 20:15:10 +0800767 uA_load = on ? vreg->max_uA : 0;
768 ret = regulator_set_load(vreg->reg, uA_load);
769 if (ret)
770 goto out;
771
Can Guo2d8e79c2018-06-25 01:05:49 -0700772 min_uV = on ? vreg->min_uV : 0;
773 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
774 if (ret) {
775 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
776 __func__, vreg->name, ret);
777 goto out;
778 }
Can Guo2d8e79c2018-06-25 01:05:49 -0700779 }
780out:
781 return ret;
782}
783
784static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
785{
786 int ret = 0;
787
788 if (vreg->enabled)
789 return ret;
790
791 ret = ufs_qcom_config_vreg(dev, vreg, true);
792 if (ret)
793 goto out;
794
795 ret = regulator_enable(vreg->reg);
796 if (ret)
797 goto out;
798
799 vreg->enabled = true;
800out:
801 return ret;
802}
803
804static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
805{
806 int ret = 0;
807
808 if (!vreg->enabled)
809 return ret;
810
811 ret = regulator_disable(vreg->reg);
812 if (ret)
813 goto out;
814
815 ret = ufs_qcom_config_vreg(dev, vreg, false);
816 if (ret)
817 goto out;
818
819 vreg->enabled = false;
820out:
821 return ret;
822}
823
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200824static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
825{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200826 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200827 struct phy *phy = host->generic_phy;
828 int ret = 0;
829
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200830 /*
Can Guo2d8e79c2018-06-25 01:05:49 -0700831 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
832 * power rail and low noise analog power rail for PLL can be
833 * switched off.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200834 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200835 if (!ufs_qcom_is_link_active(hba)) {
836 ufs_qcom_disable_lane_clks(host);
Can Guo2d8e79c2018-06-25 01:05:49 -0700837 if (host->is_phy_pwr_on) {
838 phy_power_off(phy);
839 host->is_phy_pwr_on = false;
840 }
841 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
842 ret = ufs_qcom_disable_vreg(hba->dev,
843 host->vddp_ref_clk);
844 ufs_qcom_ice_suspend(host);
845 if (ufs_qcom_is_link_off(hba)) {
846 /* Assert PHY soft reset */
847 ufs_qcom_assert_reset(hba);
848 goto out;
849 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200850 }
Can Guo2d8e79c2018-06-25 01:05:49 -0700851 /* Unvote PM QoS */
852 ufs_qcom_pm_qos_suspend(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200853
854out:
855 return ret;
856}
857
858static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
859{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200860 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200861 struct phy *phy = host->generic_phy;
862 int err;
863
Can Guo2d8e79c2018-06-25 01:05:49 -0700864 if (!host->is_phy_pwr_on) {
865 err = phy_power_on(phy);
866 if (err) {
867 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
868 __func__, err);
869 goto out;
870 }
871 host->is_phy_pwr_on = true;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200872 }
Can Guo2d8e79c2018-06-25 01:05:49 -0700873 if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
874 hba->spm_lvl > UFS_PM_LVL_3))
875 ufs_qcom_enable_vreg(hba->dev,
876 host->vddp_ref_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200877
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200878 err = ufs_qcom_enable_lane_clks(host);
879 if (err)
880 goto out;
881
Can Guo2d8e79c2018-06-25 01:05:49 -0700882 err = ufs_qcom_ice_resume(host);
883 if (err) {
884 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
885 __func__, err);
886 goto out;
887 }
888
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200889 hba->is_sys_suspended = false;
890
891out:
892 return err;
893}
894
Can Guo2d8e79c2018-06-25 01:05:49 -0700895static int ufs_qcom_full_reset(struct ufs_hba *hba)
896{
897 int ret = -ENOTSUPP;
898
899 if (!hba->core_reset) {
900 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
901 ret);
902 goto out;
903 }
904
905 ret = reset_control_assert(hba->core_reset);
906 if (ret) {
907 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
908 __func__, ret);
909 goto out;
910 }
911
912 /*
913 * The hardware requirement for delay between assert/deassert
914 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
915 * ~125us (4/32768). To be on the safe side add 200us delay.
916 */
917 usleep_range(200, 210);
918
919 ret = reset_control_deassert(hba->core_reset);
920 if (ret)
921 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
922 __func__, ret);
923
924out:
925 return ret;
926}
927
928#ifdef CONFIG_SCSI_UFS_QCOM_ICE
929static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
930 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
931{
932 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
933 struct request *req;
934 int ret;
935
936 if (lrbp->cmd && lrbp->cmd->request)
937 req = lrbp->cmd->request;
938 else
939 return 0;
940
Zhen Kongee7bdc62019-03-14 10:55:19 -0700941 /* Use request LBA or given dun as the DUN value */
Can Guoe0c627d2018-03-05 20:15:10 +0800942 if (req->bio) {
Zhen Kongee7bdc62019-03-14 10:55:19 -0700943#ifdef CONFIG_PFK
Can Guoe0c627d2018-03-05 20:15:10 +0800944 if (bio_dun(req->bio)) {
945 /* dun @bio can be split, so we have to adjust offset */
946 *dun = bio_dun(req->bio);
947 } else {
948 *dun = req->bio->bi_iter.bi_sector;
949 *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
950 }
Zhen Kongee7bdc62019-03-14 10:55:19 -0700951#else
952 *dun = req->bio->bi_iter.bi_sector;
953 *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
954#endif
Can Guoe0c627d2018-03-05 20:15:10 +0800955 }
Can Guo2d8e79c2018-06-25 01:05:49 -0700956 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
957
958 return ret;
959}
960
961static
962int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
963{
964 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
965 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
966 int err = 0;
967
968 if (!host->ice.pdev ||
969 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
970 goto out;
971
972 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
973out:
974 return err;
975}
976
977static
978int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
979 struct ufshcd_lrb *lrbp, struct request *req)
980{
981 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
982 int err = 0;
983
984 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
985 goto out;
986
987 err = ufs_qcom_ice_cfg_end(host, req);
988out:
989 return err;
990}
991
992static
993int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
994{
995 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
996 int err = 0;
997
998 if (!host->ice.pdev)
999 goto out;
1000
1001 err = ufs_qcom_ice_reset(host);
1002out:
1003 return err;
1004}
1005
1006static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
1007{
1008 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1009
1010 if (!status)
1011 return -EINVAL;
1012
1013 return ufs_qcom_ice_get_status(host, status);
1014}
1015#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
1016#define ufs_qcom_crypto_req_setup NULL
1017#define ufs_qcom_crytpo_engine_cfg_start NULL
1018#define ufs_qcom_crytpo_engine_cfg_end NULL
1019#define ufs_qcom_crytpo_engine_reset NULL
1020#define ufs_qcom_crypto_engine_get_status NULL
1021#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
1022
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001023struct ufs_qcom_dev_params {
1024 u32 pwm_rx_gear; /* pwm rx gear to work in */
1025 u32 pwm_tx_gear; /* pwm tx gear to work in */
1026 u32 hs_rx_gear; /* hs rx gear to work in */
1027 u32 hs_tx_gear; /* hs tx gear to work in */
1028 u32 rx_lanes; /* number of rx lanes */
1029 u32 tx_lanes; /* number of tx lanes */
1030 u32 rx_pwr_pwm; /* rx pwm working pwr */
1031 u32 tx_pwr_pwm; /* tx pwm working pwr */
1032 u32 rx_pwr_hs; /* rx hs working pwr */
1033 u32 tx_pwr_hs; /* tx hs working pwr */
1034 u32 hs_rate; /* rate A/B to work in HS */
1035 u32 desired_working_mode;
1036};
1037
1038static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
1039 struct ufs_pa_layer_attr *dev_max,
1040 struct ufs_pa_layer_attr *agreed_pwr)
1041{
1042 int min_qcom_gear;
1043 int min_dev_gear;
1044 bool is_dev_sup_hs = false;
1045 bool is_qcom_max_hs = false;
1046
1047 if (dev_max->pwr_rx == FAST_MODE)
1048 is_dev_sup_hs = true;
1049
1050 if (qcom_param->desired_working_mode == FAST) {
1051 is_qcom_max_hs = true;
1052 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
1053 qcom_param->hs_tx_gear);
1054 } else {
1055 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
1056 qcom_param->pwm_tx_gear);
1057 }
1058
1059 /*
1060 * device doesn't support HS but qcom_param->desired_working_mode is
1061 * HS, thus device and qcom_param don't agree
1062 */
1063 if (!is_dev_sup_hs && is_qcom_max_hs) {
1064 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
1065 __func__);
1066 return -ENOTSUPP;
1067 } else if (is_dev_sup_hs && is_qcom_max_hs) {
1068 /*
1069 * since device supports HS, it supports FAST_MODE.
1070 * since qcom_param->desired_working_mode is also HS
1071 * then final decision (FAST/FASTAUTO) is done according
1072 * to qcom_params as it is the restricting factor
1073 */
1074 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1075 qcom_param->rx_pwr_hs;
1076 } else {
1077 /*
1078 * here qcom_param->desired_working_mode is PWM.
1079 * it doesn't matter whether device supports HS or PWM,
1080 * in both cases qcom_param->desired_working_mode will
1081 * determine the mode
1082 */
1083 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1084 qcom_param->rx_pwr_pwm;
1085 }
1086
1087 /*
1088 * we would like tx to work in the minimum number of lanes
1089 * between device capability and vendor preferences.
1090 * the same decision will be made for rx
1091 */
1092 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1093 qcom_param->tx_lanes);
1094 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1095 qcom_param->rx_lanes);
1096
1097 /* device maximum gear is the minimum between device rx and tx gears */
1098 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1099
1100 /*
1101 * if both device capabilities and vendor pre-defined preferences are
1102 * both HS or both PWM then set the minimum gear to be the chosen
1103 * working gear.
1104 * if one is PWM and one is HS then the one that is PWM get to decide
1105 * what is the gear, as it is the one that also decided previously what
1106 * pwr the device will be configured to.
1107 */
1108 if ((is_dev_sup_hs && is_qcom_max_hs) ||
1109 (!is_dev_sup_hs && !is_qcom_max_hs))
1110 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1111 min_t(u32, min_dev_gear, min_qcom_gear);
1112 else if (!is_dev_sup_hs)
1113 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1114 else
1115 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1116
1117 agreed_pwr->hs_rate = qcom_param->hs_rate;
1118 return 0;
1119}
1120
Can Guo2d8e79c2018-06-25 01:05:49 -07001121#ifdef CONFIG_QCOM_BUS_SCALING
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001122static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1123 const char *speed_mode)
1124{
1125 struct device *dev = host->hba->dev;
1126 struct device_node *np = dev->of_node;
1127 int err;
1128 const char *key = "qcom,bus-vector-names";
1129
1130 if (!speed_mode) {
1131 err = -EINVAL;
1132 goto out;
1133 }
1134
1135 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1136 err = of_property_match_string(np, key, "MAX");
1137 else
1138 err = of_property_match_string(np, key, speed_mode);
1139
1140out:
1141 if (err < 0)
1142 dev_err(dev, "%s: Invalid %s mode %d\n",
1143 __func__, speed_mode, err);
1144 return err;
1145}
1146
1147static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1148{
1149 int gear = max_t(u32, p->gear_rx, p->gear_tx);
1150 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1151 int pwr;
1152
1153 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1154 if (!gear)
1155 gear = 1;
1156
1157 if (!lanes)
1158 lanes = 1;
1159
1160 if (!p->pwr_rx && !p->pwr_tx) {
1161 pwr = SLOWAUTO_MODE;
1162 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1163 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1164 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1165 pwr = FAST_MODE;
1166 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1167 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1168 } else {
1169 pwr = SLOW_MODE;
1170 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1171 "PWM", gear, lanes);
1172 }
1173}
1174
Can Guo2d8e79c2018-06-25 01:05:49 -07001175static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001176{
1177 int err = 0;
1178
1179 if (vote != host->bus_vote.curr_vote) {
1180 err = msm_bus_scale_client_update_request(
1181 host->bus_vote.client_handle, vote);
1182 if (err) {
1183 dev_err(host->hba->dev,
1184 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1185 __func__, host->bus_vote.client_handle,
1186 vote, err);
1187 goto out;
1188 }
1189
1190 host->bus_vote.curr_vote = vote;
1191 }
1192out:
1193 return err;
1194}
1195
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001196static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1197{
1198 int vote;
1199 int err = 0;
1200 char mode[BUS_VECTOR_NAME_LEN];
1201
1202 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1203
1204 vote = ufs_qcom_get_bus_vote(host, mode);
1205 if (vote >= 0)
Can Guo2d8e79c2018-06-25 01:05:49 -07001206 err = __ufs_qcom_set_bus_vote(host, vote);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001207 else
1208 err = vote;
1209
1210 if (err)
1211 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1212 else
1213 host->bus_vote.saved_vote = vote;
1214 return err;
1215}
1216
Can Guo2d8e79c2018-06-25 01:05:49 -07001217static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1218{
1219 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1220 int vote, err;
1221
1222 /*
1223 * In case ufs_qcom_init() is not yet done, simply ignore.
1224 * This ufs_qcom_set_bus_vote() shall be called from
1225 * ufs_qcom_init() after init is done.
1226 */
1227 if (!host)
1228 return 0;
1229
1230 if (on) {
1231 vote = host->bus_vote.saved_vote;
1232 if (vote == host->bus_vote.min_bw_vote)
1233 ufs_qcom_update_bus_bw_vote(host);
1234 } else {
1235 vote = host->bus_vote.min_bw_vote;
1236 }
1237
1238 err = __ufs_qcom_set_bus_vote(host, vote);
1239 if (err)
1240 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1241 __func__, err);
1242
1243 return err;
1244}
1245
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001246static ssize_t
1247show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1248 char *buf)
1249{
1250 struct ufs_hba *hba = dev_get_drvdata(dev);
1251 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1252
1253 return snprintf(buf, PAGE_SIZE, "%u\n",
1254 host->bus_vote.is_max_bw_needed);
1255}
1256
1257static ssize_t
1258store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1259 const char *buf, size_t count)
1260{
1261 struct ufs_hba *hba = dev_get_drvdata(dev);
1262 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1263 uint32_t value;
1264
1265 if (!kstrtou32(buf, 0, &value)) {
1266 host->bus_vote.is_max_bw_needed = !!value;
1267 ufs_qcom_update_bus_bw_vote(host);
1268 }
1269
1270 return count;
1271}
1272
1273static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1274{
1275 int err;
1276 struct msm_bus_scale_pdata *bus_pdata;
1277 struct device *dev = host->hba->dev;
1278 struct platform_device *pdev = to_platform_device(dev);
1279 struct device_node *np = dev->of_node;
1280
1281 bus_pdata = msm_bus_cl_get_pdata(pdev);
1282 if (!bus_pdata) {
1283 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1284 err = -ENODATA;
1285 goto out;
1286 }
1287
1288 err = of_property_count_strings(np, "qcom,bus-vector-names");
1289 if (err < 0 || err != bus_pdata->num_usecases) {
1290 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1291 __func__, err);
1292 goto out;
1293 }
1294
1295 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1296 if (!host->bus_vote.client_handle) {
1297 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1298 __func__);
1299 err = -EFAULT;
1300 goto out;
1301 }
1302
1303 /* cache the vote index for minimum and maximum bandwidth */
1304 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1305 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1306
1307 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1308 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1309 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1310 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
Can Guo2d8e79c2018-06-25 01:05:49 -07001311 host->bus_vote.max_bus_bw.attr.mode = 0644;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001312 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1313out:
1314 return err;
1315}
Can Guo2d8e79c2018-06-25 01:05:49 -07001316#else /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001317static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1318{
1319 return 0;
1320}
1321
Can Guo2d8e79c2018-06-25 01:05:49 -07001322static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001323{
1324 return 0;
1325}
1326
1327static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1328{
1329 return 0;
1330}
Can Guo2d8e79c2018-06-25 01:05:49 -07001331static inline void msm_bus_scale_unregister_client(uint32_t cl)
1332{
1333}
1334#endif /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001335
1336static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1337{
1338 if (host->dev_ref_clk_ctrl_mmio &&
1339 (enable ^ host->is_dev_ref_clk_enabled)) {
1340 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1341
1342 if (enable)
1343 temp |= host->dev_ref_clk_en_mask;
1344 else
1345 temp &= ~host->dev_ref_clk_en_mask;
1346
1347 /*
1348 * If we are here to disable this clock it might be immediately
1349 * after entering into hibern8 in which case we need to make
Can Guoe0c627d2018-03-05 20:15:10 +08001350 * sure that device ref_clk is active for a given time after
1351 * enter hibern8
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001352 */
1353 if (!enable)
Can Guoe0c627d2018-03-05 20:15:10 +08001354 udelay(host->hba->dev_ref_clk_gating_wait);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001355
1356 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1357
1358 /* ensure that ref_clk is enabled/disabled before we return */
1359 wmb();
1360
1361 /*
1362 * If we call hibern8 exit after this, we need to make sure that
Can Guoe0c627d2018-03-05 20:15:10 +08001363 * device ref_clk is stable for a given time before the hibern8
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001364 * exit command.
1365 */
Can Guoe0c627d2018-03-05 20:15:10 +08001366 if (enable) {
1367 if (host->hba->dev_info.quirks &
1368 UFS_DEVICE_QUIRK_WAIT_AFTER_REF_CLK_UNGATE)
1369 usleep_range(50, 60);
1370 else
1371 udelay(1);
1372 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001373
1374 host->is_dev_ref_clk_enabled = enable;
1375 }
1376}
1377
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001378static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001379 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001380 struct ufs_pa_layer_attr *dev_max_params,
1381 struct ufs_pa_layer_attr *dev_req_params)
1382{
1383 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001384 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001385 struct phy *phy = host->generic_phy;
1386 struct ufs_qcom_dev_params ufs_qcom_cap;
1387 int ret = 0;
1388 int res = 0;
1389
1390 if (!dev_req_params) {
1391 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1392 ret = -EINVAL;
1393 goto out;
1394 }
1395
1396 switch (status) {
1397 case PRE_CHANGE:
1398 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1399 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1400 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1401 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1402 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1403 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1404 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1405 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1406 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1407 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1408 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1409 ufs_qcom_cap.desired_working_mode =
1410 UFS_QCOM_LIMIT_DESIRED_MODE;
1411
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001412 if (host->hw_ver.major == 0x1) {
1413 /*
1414 * HS-G3 operations may not reliably work on legacy QCOM
1415 * UFS host controller hardware even though capability
1416 * exchange during link startup phase may end up
1417 * negotiating maximum supported gear as G3.
1418 * Hence downgrade the maximum supported gear to HS-G2.
1419 */
1420 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1421 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1422 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1423 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1424 }
1425
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001426 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1427 dev_max_params,
1428 dev_req_params);
1429 if (ret) {
1430 pr_err("%s: failed to determine capabilities\n",
1431 __func__);
1432 goto out;
1433 }
1434
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001435 /* enable the device ref clock before changing to HS mode */
1436 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1437 ufshcd_is_hs_mode(dev_req_params))
1438 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001439 break;
1440 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001441 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001442 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001443 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001444 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1445 __func__);
1446 /*
1447 * we return error code at the end of the routine,
1448 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1449 * and bus voting as usual
1450 */
1451 ret = -EINVAL;
1452 }
1453
1454 val = ~(MAX_U32 << dev_req_params->lane_tx);
1455 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1456 if (res) {
1457 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1458 __func__, res);
1459 ret = res;
1460 }
1461
1462 /* cache the power mode parameters to use internally */
1463 memcpy(&host->dev_req_params,
1464 dev_req_params, sizeof(*dev_req_params));
1465 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001466
1467 /* disable the device ref clock if entered PWM mode */
1468 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1469 !ufshcd_is_hs_mode(dev_req_params))
1470 ufs_qcom_dev_ref_clk_ctrl(host, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001471 break;
1472 default:
1473 ret = -EINVAL;
1474 break;
1475 }
1476out:
1477 return ret;
1478}
1479
Subhash Jadavani56d4a182016-12-05 19:25:32 -08001480static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1481{
1482 int err;
1483 u32 pa_vs_config_reg1;
1484
1485 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1486 &pa_vs_config_reg1);
1487 if (err)
1488 goto out;
1489
1490 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1491 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1492 (pa_vs_config_reg1 | (1 << 12)));
1493
1494out:
1495 return err;
1496}
1497
1498static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1499{
1500 int err = 0;
1501
Can Guo2d8e79c2018-06-25 01:05:49 -07001502 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
Subhash Jadavani56d4a182016-12-05 19:25:32 -08001503 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1504
1505 return err;
1506}
1507
Yaniv Gardiae977582015-05-17 18:55:06 +03001508static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1509{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001510 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001511
1512 if (host->hw_ver.major == 0x1)
1513 return UFSHCI_VERSION_11;
1514 else
1515 return UFSHCI_VERSION_20;
1516}
1517
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001518/**
1519 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1520 * @hba: host controller instance
1521 *
1522 * QCOM UFS host controller might have some non standard behaviours (quirks)
1523 * than what is specified by UFSHCI specification. Advertise all such
1524 * quirks to standard UFS host controller driver so standard takes them into
1525 * account.
1526 */
1527static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1528{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001529 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001530
Can Guo2d8e79c2018-06-25 01:05:49 -07001531 if (host->hw_ver.major == 0x1) {
Yaniv Gardi81637432015-05-17 18:55:02 +03001532 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001533 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1534 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001535
Can Guo2d8e79c2018-06-25 01:05:49 -07001536 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001537 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001538
1539 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001540 }
1541
Subhash Jadavani69a6fff2018-05-03 16:37:19 +05301542 if (host->hw_ver.major == 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001543 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001544
Yaniv Gardicad2e032015-03-31 17:37:14 +03001545 if (!ufs_qcom_cap_qunipro(host))
1546 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001547 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001548 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001549 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001550 }
Can Guo2d8e79c2018-06-25 01:05:49 -07001551
1552 if (host->disable_lpm)
1553 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
Yaniv Gardicad2e032015-03-31 17:37:14 +03001554}
1555
1556static void ufs_qcom_set_caps(struct ufs_hba *hba)
1557{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001558 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001559
Can Guo2d8e79c2018-06-25 01:05:49 -07001560 if (!host->disable_lpm) {
1561 hba->caps |= UFSHCD_CAP_CLK_GATING;
1562 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1563 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1564 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001565 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001566
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001567 if (host->hw_ver.major >= 0x2) {
Can Guo2d8e79c2018-06-25 01:05:49 -07001568 if (!host->disable_lpm)
1569 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001570 host->caps = UFS_QCOM_CAP_QUNIPRO |
1571 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001572 }
Can Guo2d8e79c2018-06-25 01:05:49 -07001573 if (host->hw_ver.major >= 0x3) {
1574 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1575 /*
1576 * The UFS PHY attached to v3.0.0 controller supports entering
1577 * deeper low power state of SVS2. This lets the controller
1578 * run at much lower clock frequencies for saving power.
1579 * Assuming this and any future revisions of the controller
1580 * support this capability. Need to revist this assumption if
1581 * any future platform with this core doesn't support the
1582 * capability, as there will be no benefit running at lower
1583 * frequencies then.
1584 */
1585 host->caps |= UFS_QCOM_CAP_SVS2;
1586 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001587}
1588
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001589/**
1590 * ufs_qcom_setup_clocks - enables/disable clocks
1591 * @hba: host controller instance
1592 * @on: If true, enable clocks else disable them.
Subhash Jadavani1e879e82016-10-06 21:48:22 -07001593 * @status: PRE_CHANGE or POST_CHANGE notify
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001594 *
1595 * Returns 0 on success, non-zero on failure.
1596 */
Subhash Jadavani1e879e82016-10-06 21:48:22 -07001597static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1598 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001599{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001600 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Can Guo2d8e79c2018-06-25 01:05:49 -07001601 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001602
1603 /*
1604 * In case ufs_qcom_init() is not yet done, simply ignore.
1605 * This ufs_qcom_setup_clocks() shall be called from
1606 * ufs_qcom_init() after init is done.
1607 */
1608 if (!host)
1609 return 0;
1610
Subhash Jadavani1e879e82016-10-06 21:48:22 -07001611 if (on && (status == POST_CHANGE)) {
Can Guo2d8e79c2018-06-25 01:05:49 -07001612 if (!host->is_phy_pwr_on) {
1613 phy_power_on(host->generic_phy);
1614 host->is_phy_pwr_on = true;
1615 }
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001616 /* enable the device ref clock for HS mode*/
1617 if (ufshcd_is_hs_mode(&hba->pwr_info))
1618 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001619
Can Guo2d8e79c2018-06-25 01:05:49 -07001620 err = ufs_qcom_ice_resume(host);
1621 if (err)
1622 goto out;
Subhash Jadavani1e879e82016-10-06 21:48:22 -07001623 } else if (!on && (status == PRE_CHANGE)) {
Can Guo2d8e79c2018-06-25 01:05:49 -07001624 err = ufs_qcom_ice_suspend(host);
1625 if (err)
1626 goto out;
1627
1628 /*
1629 * If auto hibern8 is supported then the link will already
1630 * be in hibern8 state and the ref clock can be gated.
1631 */
Can Guoe0c627d2018-03-05 20:15:10 +08001632 if ((ufshcd_is_auto_hibern8_supported(hba) &&
1633 hba->hibern8_on_idle.is_enabled) ||
Can Guo2d8e79c2018-06-25 01:05:49 -07001634 !ufs_qcom_is_link_active(hba)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001635 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001636 ufs_qcom_dev_ref_clk_ctrl(host, false);
1637
Vivek Gautamfeb3d792016-11-08 15:37:48 +05301638 /* powering off PHY during aggressive clk gating */
Can Guo2d8e79c2018-06-25 01:05:49 -07001639 if (host->is_phy_pwr_on) {
1640 phy_power_off(host->generic_phy);
1641 host->is_phy_pwr_on = false;
1642 }
Vivek Gautamfeb3d792016-11-08 15:37:48 +05301643 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001644 }
1645
Can Guo2d8e79c2018-06-25 01:05:49 -07001646out:
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001647 return err;
1648}
1649
Can Guo2d8e79c2018-06-25 01:05:49 -07001650#ifdef CONFIG_SMP /* CONFIG_SMP */
1651static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1652{
1653 int i;
1654
1655 if (cpu >= 0 && cpu < num_possible_cpus())
1656 for (i = 0; i < host->pm_qos.num_groups; i++)
1657 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1658 return i;
1659
1660 return host->pm_qos.default_cpu;
1661}
1662
1663static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1664{
1665 unsigned long flags;
1666 struct ufs_qcom_host *host;
1667 struct ufs_qcom_pm_qos_cpu_group *group;
1668
1669 if (!hba || !req)
1670 return;
1671
1672 host = ufshcd_get_variant(hba);
1673 if (!host->pm_qos.groups)
1674 return;
1675
1676 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1677
1678 spin_lock_irqsave(hba->host->host_lock, flags);
1679 if (!host->pm_qos.is_enabled)
1680 goto out;
1681
1682 group->active_reqs++;
1683 if (group->state != PM_QOS_REQ_VOTE &&
1684 group->state != PM_QOS_VOTED) {
1685 group->state = PM_QOS_REQ_VOTE;
1686 queue_work(host->pm_qos.workq, &group->vote_work);
1687 }
1688out:
1689 spin_unlock_irqrestore(hba->host->host_lock, flags);
1690}
1691
1692/* hba->host->host_lock is assumed to be held by caller */
1693static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1694{
1695 struct ufs_qcom_pm_qos_cpu_group *group;
1696
1697 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1698 return;
1699
1700 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1701
1702 if (--group->active_reqs)
1703 return;
1704 group->state = PM_QOS_REQ_UNVOTE;
1705 queue_work(host->pm_qos.workq, &group->unvote_work);
1706}
1707
1708static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1709 bool should_lock)
1710{
1711 unsigned long flags = 0;
1712
1713 if (!hba || !req)
1714 return;
1715
1716 if (should_lock)
1717 spin_lock_irqsave(hba->host->host_lock, flags);
1718 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1719 if (should_lock)
1720 spin_unlock_irqrestore(hba->host->host_lock, flags);
1721}
1722
1723static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1724{
1725 struct ufs_qcom_pm_qos_cpu_group *group =
1726 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1727 struct ufs_qcom_host *host = group->host;
1728 unsigned long flags;
1729
1730 spin_lock_irqsave(host->hba->host->host_lock, flags);
1731
1732 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1733 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1734 return;
1735 }
1736
1737 group->state = PM_QOS_VOTED;
1738 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1739
1740 pm_qos_update_request(&group->req, group->latency_us);
1741}
1742
1743static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1744{
1745 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1746 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1747 struct ufs_qcom_host *host = group->host;
1748 unsigned long flags;
1749
1750 /*
1751 * Check if new requests were submitted in the meantime and do not
1752 * unvote if so.
1753 */
1754 spin_lock_irqsave(host->hba->host->host_lock, flags);
1755
1756 if (!host->pm_qos.is_enabled || group->active_reqs) {
1757 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1758 return;
1759 }
1760
1761 group->state = PM_QOS_UNVOTED;
1762 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1763
1764 pm_qos_update_request_timeout(&group->req,
1765 group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
1766}
1767
1768static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1769 struct device_attribute *attr, char *buf)
1770{
1771 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1772 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1773
1774 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1775}
1776
1777static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1778 struct device_attribute *attr, const char *buf, size_t count)
1779{
1780 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1781 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1782 unsigned long value;
1783 unsigned long flags;
1784 bool enable;
1785 int i;
1786
1787 if (kstrtoul(buf, 0, &value))
1788 return -EINVAL;
1789
1790 enable = !!value;
1791
1792 /*
1793 * Must take the spinlock and save irqs before changing the enabled
1794 * flag in order to keep correctness of PM QoS release.
1795 */
1796 spin_lock_irqsave(hba->host->host_lock, flags);
1797 if (enable == host->pm_qos.is_enabled) {
1798 spin_unlock_irqrestore(hba->host->host_lock, flags);
1799 return count;
1800 }
1801 host->pm_qos.is_enabled = enable;
1802 spin_unlock_irqrestore(hba->host->host_lock, flags);
1803
1804 if (!enable)
1805 for (i = 0; i < host->pm_qos.num_groups; i++) {
1806 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1807 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1808 spin_lock_irqsave(hba->host->host_lock, flags);
1809 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1810 host->pm_qos.groups[i].active_reqs = 0;
1811 spin_unlock_irqrestore(hba->host->host_lock, flags);
1812 pm_qos_update_request(&host->pm_qos.groups[i].req,
1813 PM_QOS_DEFAULT_VALUE);
1814 }
1815
1816 return count;
1817}
1818
1819static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1820 struct device_attribute *attr, char *buf)
1821{
1822 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1823 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1824 int ret;
1825 int i;
1826 int offset = 0;
1827
1828 for (i = 0; i < host->pm_qos.num_groups; i++) {
1829 ret = snprintf(&buf[offset], PAGE_SIZE,
1830 "cpu group #%d(mask=0x%lx): %d\n", i,
1831 host->pm_qos.groups[i].mask.bits[0],
1832 host->pm_qos.groups[i].latency_us);
1833 if (ret > 0)
1834 offset += ret;
1835 else
1836 break;
1837 }
1838
1839 return offset;
1840}
1841
1842static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1843 struct device_attribute *attr, const char *buf, size_t count)
1844{
1845 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1846 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1847 unsigned long value;
1848 unsigned long flags;
1849 char *strbuf;
1850 char *strbuf_copy;
1851 char *token;
1852 int i;
1853 int ret;
1854
1855 /* reserve one byte for null termination */
1856 strbuf = kmalloc(count + 1, GFP_KERNEL);
1857 if (!strbuf)
1858 return -ENOMEM;
1859 strbuf_copy = strbuf;
1860 strlcpy(strbuf, buf, count + 1);
1861
1862 for (i = 0; i < host->pm_qos.num_groups; i++) {
1863 token = strsep(&strbuf, ",");
1864 if (!token)
1865 break;
1866
1867 ret = kstrtoul(token, 0, &value);
1868 if (ret)
1869 break;
1870
1871 spin_lock_irqsave(hba->host->host_lock, flags);
1872 host->pm_qos.groups[i].latency_us = value;
1873 spin_unlock_irqrestore(hba->host->host_lock, flags);
1874 }
1875
1876 kfree(strbuf_copy);
1877 return count;
1878}
1879
1880static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1881{
1882 struct device_node *node = host->hba->dev->of_node;
1883 struct device_attribute *attr;
1884 int ret = 0;
1885 int num_groups;
1886 int num_values;
1887 char wq_name[sizeof("ufs_pm_qos_00")];
1888 int i;
1889
1890 num_groups = of_property_count_u32_elems(node,
1891 "qcom,pm-qos-cpu-groups");
1892 if (num_groups <= 0)
1893 goto no_pm_qos;
1894
1895 num_values = of_property_count_u32_elems(node,
1896 "qcom,pm-qos-cpu-group-latency-us");
1897 if (num_values <= 0)
1898 goto no_pm_qos;
1899
1900 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1901 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1902 __func__, num_groups, num_values, num_possible_cpus());
1903 goto no_pm_qos;
1904 }
1905
1906 host->pm_qos.num_groups = num_groups;
1907 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1908 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1909 if (!host->pm_qos.groups)
1910 return -ENOMEM;
1911
1912 for (i = 0; i < host->pm_qos.num_groups; i++) {
1913 u32 mask;
1914
1915 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1916 i, &mask);
1917 if (ret)
1918 goto free_groups;
1919 host->pm_qos.groups[i].mask.bits[0] = mask;
1920 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1921 cpu_possible_mask)) {
1922 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1923 __func__, mask);
1924 goto free_groups;
1925 }
1926
1927 ret = of_property_read_u32_index(node,
1928 "qcom,pm-qos-cpu-group-latency-us", i,
1929 &host->pm_qos.groups[i].latency_us);
1930 if (ret)
1931 goto free_groups;
1932
1933 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1934 host->pm_qos.groups[i].active_reqs = 0;
1935 host->pm_qos.groups[i].host = host;
1936
1937 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1938 ufs_qcom_pm_qos_vote_work);
1939 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1940 ufs_qcom_pm_qos_unvote_work);
1941 }
1942
1943 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1944 &host->pm_qos.default_cpu);
1945 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1946 host->pm_qos.default_cpu = 0;
1947
1948 /*
1949 * Use a single-threaded workqueue to assure work submitted to the queue
1950 * is performed in order. Consider the following 2 possible cases:
1951 *
1952 * 1. A new request arrives and voting work is scheduled for it. Before
1953 * the voting work is performed the request is finished and unvote
1954 * work is also scheduled.
1955 * 2. A request is finished and unvote work is scheduled. Before the
1956 * work is performed a new request arrives and voting work is also
1957 * scheduled.
1958 *
1959 * In both cases a vote work and unvote work wait to be performed.
1960 * If ordering is not guaranteed, then the end state might be the
1961 * opposite of the desired state.
1962 */
1963 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1964 host->hba->host->host_no);
1965 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1966 if (!host->pm_qos.workq) {
1967 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1968 __func__);
1969 ret = -ENOMEM;
1970 goto free_groups;
1971 }
1972
1973 /* Initialization was ok, add all PM QoS requests */
1974 for (i = 0; i < host->pm_qos.num_groups; i++)
1975 pm_qos_add_request(&host->pm_qos.groups[i].req,
1976 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1977
1978 /* PM QoS latency sys-fs attribute */
1979 attr = &host->pm_qos.latency_attr;
1980 attr->show = ufs_qcom_pm_qos_latency_show;
1981 attr->store = ufs_qcom_pm_qos_latency_store;
1982 sysfs_attr_init(&attr->attr);
1983 attr->attr.name = "pm_qos_latency_us";
1984 attr->attr.mode = 0644;
1985 if (device_create_file(host->hba->var->dev, attr))
1986 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1987
1988 /* PM QoS enable sys-fs attribute */
1989 attr = &host->pm_qos.enable_attr;
1990 attr->show = ufs_qcom_pm_qos_enable_show;
1991 attr->store = ufs_qcom_pm_qos_enable_store;
1992 sysfs_attr_init(&attr->attr);
1993 attr->attr.name = "pm_qos_enable";
1994 attr->attr.mode = 0644;
1995 if (device_create_file(host->hba->var->dev, attr))
1996 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1997
1998 host->pm_qos.is_enabled = true;
1999
2000 return 0;
2001
2002free_groups:
2003 kfree(host->pm_qos.groups);
2004no_pm_qos:
2005 host->pm_qos.groups = NULL;
2006 return ret ? ret : -ENOTSUPP;
2007}
2008
2009static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
2010{
2011 int i;
2012
2013 if (!host->pm_qos.groups)
2014 return;
2015
2016 for (i = 0; i < host->pm_qos.num_groups; i++)
2017 flush_work(&host->pm_qos.groups[i].unvote_work);
2018}
2019
2020static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
2021{
2022 int i;
2023
2024 if (!host->pm_qos.groups)
2025 return;
2026
2027 for (i = 0; i < host->pm_qos.num_groups; i++)
2028 pm_qos_remove_request(&host->pm_qos.groups[i].req);
2029 destroy_workqueue(host->pm_qos.workq);
2030
2031 kfree(host->pm_qos.groups);
2032 host->pm_qos.groups = NULL;
2033}
2034#endif /* CONFIG_SMP */
2035
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002036#define ANDROID_BOOT_DEV_MAX 30
2037static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002038
2039#ifndef MODULE
2040static int __init get_android_boot_dev(char *str)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002041{
2042 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
2043 return 1;
2044}
2045__setup("androidboot.bootdevice=", get_android_boot_dev);
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002046#endif
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002047
Can Guo2d8e79c2018-06-25 01:05:49 -07002048/*
2049 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
2050 */
2051static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
2052{
2053 struct device_node *node = host->hba->dev->of_node;
2054
2055 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
2056 if (host->disable_lpm)
2057 pr_info("%s: will disable all LPM modes\n", __func__);
2058}
2059
2060static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
2061 struct ufs_vreg **out_vreg)
2062{
2063 int ret = 0;
2064 char prop_name[MAX_PROP_SIZE];
2065 struct ufs_vreg *vreg = NULL;
2066 struct device *dev = host->hba->dev;
2067 struct device_node *np = dev->of_node;
2068
2069 if (!np) {
2070 dev_err(dev, "%s: non DT initialization\n", __func__);
2071 goto out;
2072 }
2073
2074 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2075 if (!of_parse_phandle(np, prop_name, 0)) {
2076 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2077 __func__, prop_name);
2078 ret = -ENODEV;
2079 goto out;
2080 }
2081
2082 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2083 if (!vreg)
2084 return -ENOMEM;
2085
2086 vreg->name = name;
2087
2088 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2089 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2090 if (ret) {
2091 dev_err(dev, "%s: unable to find %s err %d\n",
2092 __func__, prop_name, ret);
2093 goto out;
2094 }
2095
2096 vreg->reg = devm_regulator_get(dev, vreg->name);
2097 if (IS_ERR(vreg->reg)) {
2098 ret = PTR_ERR(vreg->reg);
2099 dev_err(dev, "%s: %s get failed, err=%d\n",
2100 __func__, vreg->name, ret);
2101 }
2102
2103 snprintf(prop_name, MAX_PROP_SIZE, "%s-min-uV", name);
2104 ret = of_property_read_u32(np, prop_name, &vreg->min_uV);
2105 if (ret) {
2106 dev_dbg(dev, "%s: unable to find %s err %d, using default\n",
2107 __func__, prop_name, ret);
2108 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2109 ret = 0;
2110 }
2111
2112 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-uV", name);
2113 ret = of_property_read_u32(np, prop_name, &vreg->max_uV);
2114 if (ret) {
2115 dev_dbg(dev, "%s: unable to find %s err %d, using default\n",
2116 __func__, prop_name, ret);
2117 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2118 ret = 0;
2119 }
2120
2121out:
2122 if (!ret)
2123 *out_vreg = vreg;
2124 return ret;
2125}
2126
2127static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
2128{
2129 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2130 int id;
2131
2132 if (!hba->dev->of_node)
2133 return;
2134
2135 /* Extract platform data */
2136 id = of_alias_get_id(hba->dev->of_node, "ufshc");
2137 if (id <= 0)
2138 dev_err(hba->dev, "Failed to get host index %d\n", id);
2139 else if (id <= MAX_UFS_QCOM_HOSTS)
2140 ufs_qcom_hosts[id - 1] = host;
2141 else
2142 dev_err(hba->dev, "invalid host index %d\n", id);
2143}
2144
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002145/**
2146 * ufs_qcom_init - bind phy with controller
2147 * @hba: host controller instance
2148 *
2149 * Binds PHY with controller and powers up PHY enabling clocks
2150 * and regulators.
2151 *
2152 * Returns -EPROBE_DEFER if binding fails, returns negative error
2153 * on phy power up failure and returns zero on success.
2154 */
2155static int ufs_qcom_init(struct ufs_hba *hba)
2156{
2157 int err;
2158 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002159 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002160 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002161 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002162
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002163 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2164 if (!host) {
2165 err = -ENOMEM;
2166 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2167 goto out;
2168 }
2169
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002170 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002171 host->hba = hba;
Can Guo2d8e79c2018-06-25 01:05:49 -07002172 spin_lock_init(&host->ice_work_lock);
2173
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002174 ufshcd_set_variant(hba, host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002175
Can Guo2d8e79c2018-06-25 01:05:49 -07002176 err = ufs_qcom_ice_get_dev(host);
2177 if (err == -EPROBE_DEFER) {
2178 /*
2179 * UFS driver might be probed before ICE driver does.
2180 * In that case we would like to return EPROBE_DEFER code
2181 * in order to delay its probing.
2182 */
2183 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2184 __func__, err);
2185 goto out_variant_clear;
2186
2187 } else if (err == -ENODEV) {
2188 /*
2189 * ICE device is not enabled in DTS file. No need for further
2190 * initialization of ICE driver.
2191 */
2192 dev_warn(dev, "%s: ICE device is not enabled\n",
2193 __func__);
2194 } else if (err) {
2195 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2196 __func__, err);
2197 goto out_variant_clear;
Zhen Kongee7bdc62019-03-14 10:55:19 -07002198 } else {
2199 hba->host->inlinecrypt_support = 1;
Can Guo2d8e79c2018-06-25 01:05:49 -07002200 }
2201
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002202 host->generic_phy = devm_phy_get(dev, "ufsphy");
2203
Yaniv Gardiab436702016-12-05 19:25:15 -08002204 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2205 /*
2206 * UFS driver might be probed before the phy driver does.
2207 * In that case we would like to return EPROBE_DEFER code.
2208 */
2209 err = -EPROBE_DEFER;
2210 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2211 __func__, err);
2212 goto out_variant_clear;
2213 } else if (IS_ERR(host->generic_phy)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002214 err = PTR_ERR(host->generic_phy);
2215 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
Bjorn Anderssona6854df2016-11-19 22:34:51 -08002216 goto out_variant_clear;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002217 }
2218
Can Guo2d8e79c2018-06-25 01:05:49 -07002219 err = ufs_qcom_pm_qos_init(host);
2220 if (err)
2221 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2222
2223 /* restore the secure configuration */
2224 ufs_qcom_update_sec_cfg(hba, true);
2225
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002226 err = ufs_qcom_bus_register(host);
2227 if (err)
Bjorn Anderssona6854df2016-11-19 22:34:51 -08002228 goto out_variant_clear;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002229
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002230 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2231 &host->hw_ver.minor, &host->hw_ver.step);
2232
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002233 /*
2234 * for newer controllers, device reference clock control bit has
2235 * moved inside UFS controller register address space itself.
2236 */
2237 if (host->hw_ver.major >= 0x02) {
2238 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2239 host->dev_ref_clk_en_mask = BIT(26);
2240 } else {
2241 /* "dev_ref_clk_ctrl_mem" is optional resource */
2242 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2243 if (res) {
2244 host->dev_ref_clk_ctrl_mmio =
2245 devm_ioremap_resource(dev, res);
2246 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2247 dev_warn(dev,
2248 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2249 __func__,
2250 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2251 host->dev_ref_clk_ctrl_mmio = NULL;
2252 }
2253 host->dev_ref_clk_en_mask = BIT(5);
2254 }
2255 }
2256
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03002257 /* update phy revision information before calling phy_init() */
2258 ufs_qcom_phy_save_controller_version(host->generic_phy,
2259 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2260
Can Guo2d8e79c2018-06-25 01:05:49 -07002261 err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2262 &host->vddp_ref_clk);
2263
2264 phy_init(host->generic_phy);
2265
2266 if (host->vddp_ref_clk) {
2267 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2268 if (err) {
2269 dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2270 __func__, err);
2271 goto out_unregister_bus;
2272 }
2273 }
2274
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002275 err = ufs_qcom_init_lane_clks(host);
2276 if (err)
Can Guo2d8e79c2018-06-25 01:05:49 -07002277 goto out_disable_vddp;
2278
2279 ufs_qcom_parse_lpm(host);
2280 if (host->disable_lpm)
2281 pm_runtime_forbid(host->hba->dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002282
Yaniv Gardicad2e032015-03-31 17:37:14 +03002283 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002284 ufs_qcom_advertise_quirks(hba);
2285
Can Guo2d8e79c2018-06-25 01:05:49 -07002286 ufs_qcom_set_bus_vote(hba, true);
Subhash Jadavani1e879e82016-10-06 21:48:22 -07002287 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002288
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002289 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2290 ufs_qcom_get_default_testbus_cfg(host);
2291 err = ufs_qcom_testbus_config(host);
2292 if (err) {
2293 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2294 __func__, err);
2295 err = 0;
2296 }
2297
Can Guo2d8e79c2018-06-25 01:05:49 -07002298 ufs_qcom_save_host_ptr(hba);
2299
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002300 goto out;
2301
Can Guo2d8e79c2018-06-25 01:05:49 -07002302out_disable_vddp:
2303 if (host->vddp_ref_clk)
2304 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
2305out_unregister_bus:
2306 phy_exit(host->generic_phy);
2307 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Bjorn Anderssona6854df2016-11-19 22:34:51 -08002308out_variant_clear:
Can Guo2d8e79c2018-06-25 01:05:49 -07002309 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002310 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002311out:
2312 return err;
2313}
2314
2315static void ufs_qcom_exit(struct ufs_hba *hba)
2316{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002317 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002318
Can Guo2d8e79c2018-06-25 01:05:49 -07002319 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002320 ufs_qcom_disable_lane_clks(host);
Can Guo2d8e79c2018-06-25 01:05:49 -07002321 if (host->is_phy_pwr_on) {
2322 phy_power_off(host->generic_phy);
2323 host->is_phy_pwr_on = false;
2324 }
Vivek Gautamd7fe6b62016-11-08 15:37:50 +05302325 phy_exit(host->generic_phy);
Can Guo2d8e79c2018-06-25 01:05:49 -07002326 ufs_qcom_pm_qos_remove(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002327}
2328
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002329static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
Can Guo2d8e79c2018-06-25 01:05:49 -07002330 u32 clk_1us_cycles,
2331 u32 clk_40ns_cycles)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002332{
Can Guo2d8e79c2018-06-25 01:05:49 -07002333 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002334 int err;
Can Guo2d8e79c2018-06-25 01:05:49 -07002335 u32 core_clk_ctrl_reg, clk_cycles;
2336 u32 mask = DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2337 u32 offset = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002338
Can Guo2d8e79c2018-06-25 01:05:49 -07002339 /* Bits mask and offset changed on UFS host controller V4.0.0 onwards */
2340 if (host->hw_ver.major >= 4) {
2341 mask = DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK_V4;
2342 offset = DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_OFFSET_V4;
2343 }
2344
2345 if (clk_1us_cycles > mask)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002346 return -EINVAL;
2347
2348 err = ufshcd_dme_get(hba,
2349 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2350 &core_clk_ctrl_reg);
2351 if (err)
2352 goto out;
2353
Can Guo2d8e79c2018-06-25 01:05:49 -07002354 core_clk_ctrl_reg &= ~mask;
2355 core_clk_ctrl_reg |= clk_1us_cycles;
2356 core_clk_ctrl_reg <<= offset;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002357
2358 /* Clear CORE_CLK_DIV_EN */
2359 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2360
2361 err = ufshcd_dme_set(hba,
2362 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2363 core_clk_ctrl_reg);
Can Guo2d8e79c2018-06-25 01:05:49 -07002364
2365 /* UFS host controller V4.0.0 onwards needs to program
2366 * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed frequency of
2367 * unipro core clk of UFS host controller.
2368 */
2369 if (!err && (host->hw_ver.major >= 4)) {
2370 if (clk_40ns_cycles > PA_VS_CORE_CLK_40NS_CYCLES_MASK)
2371 return -EINVAL;
2372
2373 err = ufshcd_dme_get(hba,
2374 UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES),
2375 &clk_cycles);
2376 if (err)
2377 goto out;
2378
2379 clk_cycles &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
2380 clk_cycles |= clk_40ns_cycles;
2381
2382 err = ufshcd_dme_set(hba,
2383 UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES),
2384 clk_cycles);
2385 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002386out:
2387 return err;
2388}
2389
2390static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2391{
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002392 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Can Guo2d8e79c2018-06-25 01:05:49 -07002393 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2394 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002395
2396 if (!ufs_qcom_cap_qunipro(host))
Can Guo2d8e79c2018-06-25 01:05:49 -07002397 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002398
Can Guo2d8e79c2018-06-25 01:05:49 -07002399 if (attr)
2400 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2401 attr->hs_rate, false, true);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002402
Can Guo2d8e79c2018-06-25 01:05:49 -07002403 err = ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(hba);
2404out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002405 return err;
2406}
2407
2408static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2409{
2410 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Can Guo2d8e79c2018-06-25 01:05:49 -07002411 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2412 int err = 0;
2413 struct ufs_clk_info *clki;
2414 struct list_head *head = &hba->clk_list_head;
2415 u32 curr_freq = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002416
2417 if (!ufs_qcom_cap_qunipro(host))
2418 return 0;
2419
Can Guo2d8e79c2018-06-25 01:05:49 -07002420 if (attr)
2421 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2422 attr->hs_rate, false);
2423
2424 list_for_each_entry(clki, head, list) {
2425 if (!IS_ERR_OR_NULL(clki->clk) &&
2426 (!strcmp(clki->name, "core_clk_unipro"))) {
2427 curr_freq = clk_get_rate(clki->clk);
2428 break;
2429 }
2430 }
2431
2432 switch (curr_freq) {
2433 case 37500000:
2434 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 38, 2);
2435 break;
2436 case 75000000:
2437 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75, 3);
2438 break;
2439 default:
2440 err = -EINVAL;
2441 break;
2442 }
2443
2444 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002445}
2446
2447static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2448 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002449{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002450 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002451 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002452
Can Guo2d8e79c2018-06-25 01:05:49 -07002453 switch (status) {
2454 case PRE_CHANGE:
2455
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002456 if (scale_up)
2457 err = ufs_qcom_clk_scale_up_pre_change(hba);
Can Guo2d8e79c2018-06-25 01:05:49 -07002458 break;
2459 case POST_CHANGE:
2460 if (!scale_up)
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002461 err = ufs_qcom_clk_scale_down_post_change(hba);
2462
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002463 ufs_qcom_update_bus_bw_vote(host);
Can Guo2d8e79c2018-06-25 01:05:49 -07002464 break;
2465 default:
2466 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2467 err = -EINVAL;
2468 break;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002469 }
2470
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002471 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002472}
2473
Can Guo2d8e79c2018-06-25 01:05:49 -07002474/*
2475 * This function should be called to restore the security configuration of UFS
2476 * register space after coming out of UFS host core power collapse.
2477 *
2478 * @hba: host controller instance
2479 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2480 * and set "false" when secure configuration is lost.
2481 */
2482static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2483{
2484 return 0;
2485}
2486
2487static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2488{
2489 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2490
2491 if (ufs_qcom_cap_svs2(host))
2492 return UFS_HS_G1;
2493 /* Default SVS support @ HS G2 frequencies*/
2494 return UFS_HS_G2;
2495}
2496
2497void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2498 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2499 char *str, void *priv))
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002500{
2501 u32 reg;
2502 struct ufs_qcom_host *host;
2503
2504 if (unlikely(!hba)) {
2505 pr_err("%s: hba is NULL\n", __func__);
2506 return;
2507 }
2508 if (unlikely(!print_fn)) {
2509 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2510 return;
2511 }
2512
2513 host = ufshcd_get_variant(hba);
2514 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2515 return;
2516
2517 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2518 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2519
2520 reg = ufshcd_readl(hba, REG_UFS_CFG1);
Alim Akhtar7e014ef2017-10-03 20:51:23 +05302521 reg |= UTP_DBG_RAMS_EN;
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002522 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2523
2524 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2525 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2526
2527 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2528 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2529
2530 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2531 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2532
Subhash Jadavanib84ca6e2016-12-05 19:25:42 -08002533 /* clear bit 17 - UTP_DBG_RAMS_EN */
Alim Akhtar7e014ef2017-10-03 20:51:23 +05302534 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002535
2536 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2537 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2538
2539 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2540 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2541
2542 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2543 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2544
2545 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2546 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2547
2548 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2549 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2550
2551 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2552 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2553
2554 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2555 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2556}
2557
2558static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2559{
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002560 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2561 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2562 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002563 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002564 } else {
2565 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002566 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002567 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002568}
2569
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002570static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2571{
2572 /* provide a legal default configuration */
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002573 host->testbus.select_major = TSTBUS_UNIPRO;
2574 host->testbus.select_minor = 37;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002575}
2576
Can Guo2d8e79c2018-06-25 01:05:49 -07002577bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
2578 u8 select_major, u8 select_minor)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002579{
Can Guo2d8e79c2018-06-25 01:05:49 -07002580 if (select_major >= TSTBUS_MAX) {
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002581 dev_err(host->hba->dev,
2582 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
Can Guo2d8e79c2018-06-25 01:05:49 -07002583 __func__, select_major);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002584 return false;
2585 }
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002586 return true;
2587}
2588
Can Guo2d8e79c2018-06-25 01:05:49 -07002589/*
2590 * The caller of this function must make sure that the controller
2591 * is out of runtime suspend and appropriate clocks are enabled
2592 * before accessing.
2593 */
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002594int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2595{
Can Guo2d8e79c2018-06-25 01:05:49 -07002596 int reg = 0;
2597 int offset = -1, ret = 0, testbus_sel_offset = 19;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002598 u32 mask = TEST_BUS_SUB_SEL_MASK;
Can Guo2d8e79c2018-06-25 01:05:49 -07002599 unsigned long flags;
2600 struct ufs_hba *hba;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002601
2602 if (!host)
2603 return -EINVAL;
Can Guo2d8e79c2018-06-25 01:05:49 -07002604 hba = host->hba;
2605 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002606 switch (host->testbus.select_major) {
2607 case TSTBUS_UAWM:
2608 reg = UFS_TEST_BUS_CTRL_0;
2609 offset = 24;
2610 break;
2611 case TSTBUS_UARM:
2612 reg = UFS_TEST_BUS_CTRL_0;
2613 offset = 16;
2614 break;
2615 case TSTBUS_TXUC:
2616 reg = UFS_TEST_BUS_CTRL_0;
2617 offset = 8;
2618 break;
2619 case TSTBUS_RXUC:
2620 reg = UFS_TEST_BUS_CTRL_0;
2621 offset = 0;
2622 break;
2623 case TSTBUS_DFC:
2624 reg = UFS_TEST_BUS_CTRL_1;
2625 offset = 24;
2626 break;
2627 case TSTBUS_TRLUT:
2628 reg = UFS_TEST_BUS_CTRL_1;
2629 offset = 16;
2630 break;
2631 case TSTBUS_TMRLUT:
2632 reg = UFS_TEST_BUS_CTRL_1;
2633 offset = 8;
2634 break;
2635 case TSTBUS_OCSC:
2636 reg = UFS_TEST_BUS_CTRL_1;
2637 offset = 0;
2638 break;
2639 case TSTBUS_WRAPPER:
2640 reg = UFS_TEST_BUS_CTRL_2;
2641 offset = 16;
2642 break;
2643 case TSTBUS_COMBINED:
2644 reg = UFS_TEST_BUS_CTRL_2;
2645 offset = 8;
2646 break;
2647 case TSTBUS_UTP_HCI:
2648 reg = UFS_TEST_BUS_CTRL_2;
2649 offset = 0;
2650 break;
2651 case TSTBUS_UNIPRO:
2652 reg = UFS_UNIPRO_CFG;
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002653 offset = 20;
2654 mask = 0xFFF;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002655 break;
2656 /*
2657 * No need for a default case, since
2658 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2659 * is legal
2660 */
2661 }
Can Guo2d8e79c2018-06-25 01:05:49 -07002662
2663 if (offset < 0) {
2664 dev_err(hba->dev, "%s: Bad offset: %d\n", __func__, offset);
2665 ret = -EINVAL;
2666 spin_unlock_irqrestore(hba->host->host_lock, flags);
2667 goto out;
2668 }
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002669 mask <<= offset;
2670
Can Guo2d8e79c2018-06-25 01:05:49 -07002671 spin_unlock_irqrestore(hba->host->host_lock, flags);
2672 if (reg) {
2673 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2674 (u32)host->testbus.select_major << testbus_sel_offset,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002675 REG_UFS_CFG1);
Can Guo2d8e79c2018-06-25 01:05:49 -07002676 ufshcd_rmwl(host->hba, mask,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002677 (u32)host->testbus.select_minor << offset,
2678 reg);
Can Guo2d8e79c2018-06-25 01:05:49 -07002679 } else {
2680 dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
2681 ret = -EINVAL;
2682 goto out;
2683 }
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002684 ufs_qcom_enable_test_bus(host);
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002685 /*
2686 * Make sure the test bus configuration is
2687 * committed before returning.
2688 */
2689 mb();
Can Guo2d8e79c2018-06-25 01:05:49 -07002690out:
2691 return ret;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002692}
2693
2694static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2695{
Can Guo2d8e79c2018-06-25 01:05:49 -07002696 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002697}
2698
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002699static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
2700{
2701 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2702 u32 *testbus = NULL;
2703 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2704
2705 testbus = kmalloc(testbus_len, GFP_KERNEL);
2706 if (!testbus)
2707 return;
2708
2709 host->testbus.select_major = TSTBUS_UNIPRO;
2710 for (i = 0; i < nminor; i++) {
2711 host->testbus.select_minor = i;
2712 ufs_qcom_testbus_config(host);
2713 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2714 }
2715 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2716 16, 4, testbus, testbus_len, false);
2717 kfree(testbus);
2718}
2719
Can Guo2d8e79c2018-06-25 01:05:49 -07002720static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba)
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002721{
Can Guo2d8e79c2018-06-25 01:05:49 -07002722 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2723 u32 *testbus = NULL;
2724 int i, nminor = 32, testbus_len = nminor * sizeof(u32);
2725
2726 testbus = kmalloc(testbus_len, GFP_KERNEL);
2727 if (!testbus)
2728 return;
2729
2730 host->testbus.select_major = TSTBUS_UTP_HCI;
2731 for (i = 0; i < nminor; i++) {
2732 host->testbus.select_minor = i;
2733 ufs_qcom_testbus_config(host);
2734 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2735 }
2736 print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET,
2737 16, 4, testbus, testbus_len, false);
2738 kfree(testbus);
2739}
2740
2741static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2742{
2743 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2744 struct phy *phy = host->generic_phy;
2745
2746 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2747 "HCI Vendor Specific Registers ");
2748 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
2749
2750 if (no_sleep)
2751 return;
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002752
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002753 /* sleep a bit intermittently as we are dumping too much data */
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002754 usleep_range(1000, 1100);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002755 ufs_qcom_testbus_read(hba);
Venkat Gopalakrishnan9c46b862017-02-03 16:58:12 -08002756 usleep_range(1000, 1100);
2757 ufs_qcom_print_unipro_testbus(hba);
2758 usleep_range(1000, 1100);
Can Guo2d8e79c2018-06-25 01:05:49 -07002759 ufs_qcom_print_utp_hci_testbus(hba);
2760 usleep_range(1000, 1100);
2761 ufs_qcom_phy_dbg_register_dump(phy);
2762 usleep_range(1000, 1100);
2763 ufs_qcom_ice_print_regs(host);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002764}
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002765
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002766/**
2767 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2768 *
2769 * The variant operations configure the necessary controller and PHY
2770 * handshake during initialization.
2771 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02002772static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002773 .init = ufs_qcom_init,
2774 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03002775 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002776 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2777 .setup_clocks = ufs_qcom_setup_clocks,
2778 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2779 .link_startup_notify = ufs_qcom_link_startup_notify,
2780 .pwr_change_notify = ufs_qcom_pwr_change_notify,
Subhash Jadavani56d4a182016-12-05 19:25:32 -08002781 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002782 .suspend = ufs_qcom_suspend,
2783 .resume = ufs_qcom_resume,
Can Guo2d8e79c2018-06-25 01:05:49 -07002784 .full_reset = ufs_qcom_full_reset,
2785 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2786 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
2787 .set_bus_vote = ufs_qcom_set_bus_vote,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002788 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Can Guo2d8e79c2018-06-25 01:05:49 -07002789#ifdef CONFIG_DEBUG_FS
2790 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2791#endif
2792};
2793
2794static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2795 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2796 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2797 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2798 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2799 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2800};
2801
2802static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2803 .req_start = ufs_qcom_pm_qos_req_start,
2804 .req_end = ufs_qcom_pm_qos_req_end,
2805};
2806
2807static struct ufs_hba_variant ufs_hba_qcom_variant = {
2808 .name = "qcom",
2809 .vops = &ufs_hba_qcom_vops,
2810 .crypto_vops = &ufs_hba_crypto_variant_ops,
2811 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002812};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002813
Yaniv Gardi47555a52015-10-28 13:15:49 +02002814/**
2815 * ufs_qcom_probe - probe routine of the driver
2816 * @pdev: pointer to Platform device handle
2817 *
2818 * Return zero for success and non-zero for failure
2819 */
2820static int ufs_qcom_probe(struct platform_device *pdev)
2821{
2822 int err;
2823 struct device *dev = &pdev->dev;
Can Guo2d8e79c2018-06-25 01:05:49 -07002824 struct device_node *np = dev->of_node;
2825
2826 /*
2827 * On qcom platforms, bootdevice is the primary storage
2828 * device. This device can either be eMMC or UFS.
2829 * The type of device connected is detected at runtime.
2830 * So, if an eMMC device is connected, and this function
2831 * is invoked, it would turn-off the regulator if it detects
2832 * that the storage device is not ufs.
2833 * These regulators are turned ON by the bootloaders & turning
2834 * them off without sending PON may damage the connected device.
2835 * Hence, check for the connected device early-on & don't turn-off
2836 * the regulators.
2837 */
2838 if (of_property_read_bool(np, "non-removable") &&
2839 strlen(android_boot_dev) &&
2840 strcmp(android_boot_dev, dev_name(dev)))
2841 return -ENODEV;
Yaniv Gardi47555a52015-10-28 13:15:49 +02002842
2843 /* Perform generic probe */
Can Guo2d8e79c2018-06-25 01:05:49 -07002844 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002845 if (err)
2846 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2847
2848 return err;
2849}
2850
2851/**
2852 * ufs_qcom_remove - set driver_data of the device to NULL
2853 * @pdev: pointer to platform device handle
2854 *
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +02002855 * Always returns 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02002856 */
2857static int ufs_qcom_remove(struct platform_device *pdev)
2858{
2859 struct ufs_hba *hba = platform_get_drvdata(pdev);
2860
2861 pm_runtime_get_sync(&(pdev)->dev);
2862 ufshcd_remove(hba);
2863 return 0;
2864}
2865
2866static const struct of_device_id ufs_qcom_of_match[] = {
2867 { .compatible = "qcom,ufshc"},
2868 {},
2869};
Javier Martinez Canillasab3dabb2017-01-02 11:04:58 -03002870MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002871
2872static const struct dev_pm_ops ufs_qcom_pm_ops = {
2873 .suspend = ufshcd_pltfrm_suspend,
2874 .resume = ufshcd_pltfrm_resume,
2875 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2876 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2877 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2878};
2879
2880static struct platform_driver ufs_qcom_pltform = {
2881 .probe = ufs_qcom_probe,
2882 .remove = ufs_qcom_remove,
2883 .shutdown = ufshcd_pltfrm_shutdown,
2884 .driver = {
2885 .name = "ufshcd-qcom",
2886 .pm = &ufs_qcom_pm_ops,
2887 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2888 },
2889};
2890module_platform_driver(ufs_qcom_pltform);
2891
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002892MODULE_LICENSE("GPL v2");