blob: 9706273e2d16aef0ae2e52996b0e49211921ef46 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Yaniv Gardi54b879b2016-03-10 17:37:05 +02002 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070017#include <linux/iopoll.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020018#include <linux/platform_device.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070019
20#ifdef CONFIG_QCOM_BUS_SCALING
21#include <linux/msm-bus.h>
22#endif
23
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020024#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020025#include <linux/phy/phy-qcom-ufs.h>
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020026
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020027#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020028#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020029#include "unipro.h"
30#include "ufs-qcom.h"
31#include "ufshci.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070032#include "ufs_quirks.h"
33#include "ufs-qcom-ice.h"
34#include "ufs-qcom-debugfs.h"
35
36/* TODO: further tuning for this parameter may be required */
37#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
38
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020039#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
40 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
41
42enum {
43 TSTBUS_UAWM,
44 TSTBUS_UARM,
45 TSTBUS_TXUC,
46 TSTBUS_RXUC,
47 TSTBUS_DFC,
48 TSTBUS_TRLUT,
49 TSTBUS_TMRLUT,
50 TSTBUS_OCSC,
51 TSTBUS_UTP_HCI,
52 TSTBUS_COMBINED,
53 TSTBUS_WRAPPER,
54 TSTBUS_UNIPRO,
55 TSTBUS_MAX,
56};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020057
58static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
59
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070060static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020061static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020062static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
63 u32 clk_cycles);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070064static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020065
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020066static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
67 char *prefix)
68{
69 print_hex_dump(KERN_ERR, prefix,
70 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070071 16, 4, hba->mmio_base + offset, len * 4, false);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020072}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020073
Yaniv Gardieba5ed32016-03-10 17:37:21 +020074static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
75 char *prefix, void *priv)
76{
77 ufs_qcom_dump_regs(hba, offset, len, prefix);
78}
79
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020080static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
81{
82 int err = 0;
83
84 err = ufshcd_dme_get(hba,
85 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
86 if (err)
87 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
88 __func__, err);
89
90 return err;
91}
92
93static int ufs_qcom_host_clk_get(struct device *dev,
94 const char *name, struct clk **clk_out)
95{
96 struct clk *clk;
97 int err = 0;
98
99 clk = devm_clk_get(dev, name);
100 if (IS_ERR(clk)) {
101 err = PTR_ERR(clk);
102 dev_err(dev, "%s: failed to get %s err %d",
103 __func__, name, err);
104 } else {
105 *clk_out = clk;
106 }
107
108 return err;
109}
110
111static int ufs_qcom_host_clk_enable(struct device *dev,
112 const char *name, struct clk *clk)
113{
114 int err = 0;
115
116 err = clk_prepare_enable(clk);
117 if (err)
118 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
119
120 return err;
121}
122
123static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
124{
125 if (!host->is_lane_clks_enabled)
126 return;
127
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700128 if (host->tx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200129 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200130 clk_disable_unprepare(host->tx_l0_sync_clk);
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700131 if (host->rx_l1_sync_clk)
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200132 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200133 clk_disable_unprepare(host->rx_l0_sync_clk);
134
135 host->is_lane_clks_enabled = false;
136}
137
138static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
139{
140 int err = 0;
141 struct device *dev = host->hba->dev;
142
143 if (host->is_lane_clks_enabled)
144 return 0;
145
146 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
147 host->rx_l0_sync_clk);
148 if (err)
149 goto out;
150
151 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
152 host->tx_l0_sync_clk);
153 if (err)
154 goto disable_rx_l0;
155
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200156 if (host->hba->lanes_per_direction > 1) {
157 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
158 host->rx_l1_sync_clk);
159 if (err)
160 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200161
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700162 /* The tx lane1 clk could be muxed, hence keep this optional */
163 if (host->tx_l1_sync_clk)
164 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
165 host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200166 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200167 host->is_lane_clks_enabled = true;
168 goto out;
169
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200170disable_tx_l0:
171 clk_disable_unprepare(host->tx_l0_sync_clk);
172disable_rx_l0:
173 clk_disable_unprepare(host->rx_l0_sync_clk);
174out:
175 return err;
176}
177
178static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
179{
180 int err = 0;
181 struct device *dev = host->hba->dev;
182
183 err = ufs_qcom_host_clk_get(dev,
184 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
185 if (err)
186 goto out;
187
188 err = ufs_qcom_host_clk_get(dev,
189 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
190 if (err)
191 goto out;
192
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200193 /* In case of single lane per direction, don't read lane1 clocks */
194 if (host->hba->lanes_per_direction > 1) {
195 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
196 &host->rx_l1_sync_clk);
197 if (err)
198 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200199
Venkat Gopalakrishnanac795e72016-08-18 16:58:20 -0700200 /* The tx lane1 clk could be muxed, hence keep this optional */
201 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
202 &host->tx_l1_sync_clk);
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200203 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200204out:
205 return err;
206}
207
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200208static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
209{
210 int err;
211 u32 tx_fsm_val = 0;
212 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
213
214 do {
215 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200216 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
217 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
218 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200219 if (err || tx_fsm_val == TX_FSM_HIBERN8)
220 break;
221
222 /* sleep for max. 200us */
223 usleep_range(100, 200);
224 } while (time_before(jiffies, timeout));
225
226 /*
227 * we might have scheduled out for long during polling so
228 * check the state again.
229 */
230 if (time_after(jiffies, timeout))
231 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200232 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
233 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
234 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200235
236 if (err) {
237 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
238 __func__, err);
239 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
240 err = tx_fsm_val;
241 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
242 __func__, err);
243 }
244
245 return err;
246}
247
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200248static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
249{
250 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
251 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
252 REG_UFS_CFG1);
253 /* make sure above configuration is applied before we return */
254 mb();
255}
256
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200257static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
258{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200259 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200260 struct phy *phy = host->generic_phy;
261 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200262 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
263 ? true : false;
264
265 /* Assert PHY reset and apply PHY calibration values */
266 ufs_qcom_assert_reset(hba);
267 /* provide 1ms delay to let the reset pulse propagate */
268 usleep_range(1000, 1100);
269
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200270 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200271
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200272 if (ret) {
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200273 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
274 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200275 goto out;
276 }
277
278 /* De-assert PHY reset and start serdes */
279 ufs_qcom_deassert_reset(hba);
280
281 /*
282 * after reset deassertion, phy will need all ref clocks,
283 * voltage, current to settle down before starting serdes.
284 */
285 usleep_range(1000, 1100);
286 ret = ufs_qcom_phy_start_serdes(phy);
287 if (ret) {
288 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
289 __func__, ret);
290 goto out;
291 }
292
293 ret = ufs_qcom_phy_is_pcs_ready(phy);
294 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700295 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200296 __func__, ret);
297
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200298 ufs_qcom_select_unipro_mode(host);
299
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200300out:
301 return ret;
302}
303
304/*
305 * The UTP controller has a number of internal clock gating cells (CGCs).
306 * Internal hardware sub-modules within the UTP controller control the CGCs.
307 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
308 * in a specific operation, UTP controller CGCs are by default disabled and
309 * this function enables them (after every UFS link startup) to save some power
310 * leakage.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700311 *
312 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
313 * in Qunipro, enable them to save additional power.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200314 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700315static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200316{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700317 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
318 int err = 0;
319
320 /* Enable UTP internal clock gating */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200321 ufshcd_writel(hba,
322 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
323 REG_UFS_CFG2);
324
325 /* Ensure that HW clock gating is enabled before next operations */
326 mb();
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700327
328 /* Enable Qunipro internal clock gating if supported */
329 if (!ufs_qcom_cap_qunipro_clk_gating(host))
330 goto out;
331
332 /* Enable all the mask bits */
333 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
334 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
335 if (err)
336 goto out;
337
338 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
339 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
340 if (err)
341 goto out;
342
343 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
344 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
345 DME_VS_CORE_CLK_CTRL);
346out:
347 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200348}
349
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200350static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
351 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200352{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200353 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200354 int err = 0;
355
356 switch (status) {
357 case PRE_CHANGE:
358 ufs_qcom_power_up_sequence(hba);
359 /*
360 * The PHY PLL output is the source of tx/rx lane symbol
361 * clocks, hence, enable the lane clocks only after PHY
362 * is initialized.
363 */
364 err = ufs_qcom_enable_lane_clks(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700365 if (!err && host->ice.pdev) {
366 err = ufs_qcom_ice_init(host);
367 if (err) {
368 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
369 __func__, err);
370 err = -EINVAL;
371 }
372 }
373
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200374 break;
375 case POST_CHANGE:
376 /* check if UFS PHY moved from DISABLED to HIBERN8 */
377 err = ufs_qcom_check_hibern8(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200378 break;
379 default:
380 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
381 err = -EINVAL;
382 break;
383 }
384 return err;
385}
386
387/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200388 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200389 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200390static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
391 u32 hs, u32 rate, bool update_link_startup_timer)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200392{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200393 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200394 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200395 struct ufs_clk_info *clki;
396 u32 core_clk_period_in_ns;
397 u32 tx_clk_cycles_per_us = 0;
398 unsigned long core_clk_rate = 0;
399 u32 core_clk_cycles_per_us = 0;
400
401 static u32 pwm_fr_table[][2] = {
402 {UFS_PWM_G1, 0x1},
403 {UFS_PWM_G2, 0x1},
404 {UFS_PWM_G3, 0x1},
405 {UFS_PWM_G4, 0x1},
406 };
407
408 static u32 hs_fr_table_rA[][2] = {
409 {UFS_HS_G1, 0x1F},
410 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200411 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200412 };
413
414 static u32 hs_fr_table_rB[][2] = {
415 {UFS_HS_G1, 0x24},
416 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200417 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200418 };
419
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300420 /*
421 * The Qunipro controller does not use following registers:
422 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
423 * UFS_REG_PA_LINK_STARTUP_TIMER
424 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700425 * Aggregation / Auto hibern8 logic.
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300426 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700427 if (ufs_qcom_cap_qunipro(host) &&
428 (!(ufshcd_is_intr_aggr_allowed(hba) ||
429 ufshcd_is_auto_hibern8_supported(hba))))
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300430 goto out;
431
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200432 if (gear == 0) {
433 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
434 goto out_error;
435 }
436
437 list_for_each_entry(clki, &hba->clk_list_head, list) {
438 if (!strcmp(clki->name, "core_clk"))
439 core_clk_rate = clk_get_rate(clki->clk);
440 }
441
442 /* If frequency is smaller than 1MHz, set to 1MHz */
443 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
444 core_clk_rate = DEFAULT_CLK_RATE_HZ;
445
446 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200447 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
448 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
449 /*
450 * make sure above write gets applied before we return from
451 * this function.
452 */
453 mb();
454 }
455
456 if (ufs_qcom_cap_qunipro(host))
457 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200458
459 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
460 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
461 core_clk_period_in_ns &= MASK_CLK_NS_REG;
462
463 switch (hs) {
464 case FASTAUTO_MODE:
465 case FAST_MODE:
466 if (rate == PA_HS_MODE_A) {
467 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
468 dev_err(hba->dev,
469 "%s: index %d exceeds table size %zu\n",
470 __func__, gear,
471 ARRAY_SIZE(hs_fr_table_rA));
472 goto out_error;
473 }
474 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
475 } else if (rate == PA_HS_MODE_B) {
476 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
477 dev_err(hba->dev,
478 "%s: index %d exceeds table size %zu\n",
479 __func__, gear,
480 ARRAY_SIZE(hs_fr_table_rB));
481 goto out_error;
482 }
483 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
484 } else {
485 dev_err(hba->dev, "%s: invalid rate = %d\n",
486 __func__, rate);
487 goto out_error;
488 }
489 break;
490 case SLOWAUTO_MODE:
491 case SLOW_MODE:
492 if (gear > ARRAY_SIZE(pwm_fr_table)) {
493 dev_err(hba->dev,
494 "%s: index %d exceeds table size %zu\n",
495 __func__, gear,
496 ARRAY_SIZE(pwm_fr_table));
497 goto out_error;
498 }
499 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
500 break;
501 case UNCHANGED:
502 default:
503 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
504 goto out_error;
505 }
506
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200507 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
508 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
509 /* this register 2 fields shall be written at once */
510 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
511 REG_UFS_TX_SYMBOL_CLK_NS_US);
512 /*
513 * make sure above write gets applied before we return from
514 * this function.
515 */
516 mb();
517 }
518
519 if (update_link_startup_timer) {
520 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
521 REG_UFS_PA_LINK_STARTUP_TIMER);
522 /*
523 * make sure that this configuration is applied before
524 * we return
525 */
526 mb();
527 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200528 goto out;
529
530out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200531 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200532out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200533 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200534}
535
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700536static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
537{
538 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
539 struct phy *phy = host->generic_phy;
540 u32 unipro_ver;
541 int err = 0;
542
543 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
544 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
545 __func__);
546 err = -EINVAL;
547 goto out;
548 }
549
550 /* make sure RX LineCfg is enabled before link startup */
551 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
552 if (err)
553 goto out;
554
555 if (ufs_qcom_cap_qunipro(host)) {
556 /*
557 * set unipro core clock cycles to 150 & clear clock divider
558 */
559 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
560 if (err)
561 goto out;
562 }
563
564 err = ufs_qcom_enable_hw_clk_gating(hba);
565 if (err)
566 goto out;
567
568 /*
569 * Some UFS devices (and may be host) have issues if LCC is
570 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
571 * before link startup which will make sure that both host
572 * and device TX LCC are disabled once link startup is
573 * completed.
574 */
575 unipro_ver = ufshcd_get_local_unipro_ver(hba);
576 if (unipro_ver != UFS_UNIPRO_VER_1_41)
577 err = ufshcd_dme_set(hba,
578 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
579 0);
580 if (err)
581 goto out;
582
583 if (!ufs_qcom_cap_qunipro_clk_gating(host))
584 goto out;
585
586 /* Enable all the mask bits */
587 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
588 SAVECONFIGTIME_MODE_MASK,
589 PA_VS_CONFIG_REG1);
590out:
591 return err;
592}
593
594static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
595{
596 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
597 struct phy *phy = host->generic_phy;
598 u32 tx_lanes;
599 int err = 0;
600
601 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
602 if (err)
603 goto out;
604
605 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
606 if (err) {
607 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
608 __func__);
609 goto out;
610 }
611
612 /*
613 * Some UFS devices send incorrect LineCfg data as part of power mode
614 * change sequence which may cause host PHY to go into bad state.
615 * Disabling Rx LineCfg of host PHY should help avoid this.
616 */
617 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
618 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
619 if (err) {
620 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
621 __func__);
622 goto out;
623 }
624
625 /*
626 * UFS controller has *clk_req output to GCC, for each one if the clocks
627 * entering it. When *clk_req for a specific clock is de-asserted,
628 * a corresponding clock from GCC is stopped. UFS controller de-asserts
629 * *clk_req outputs when it is in Auto Hibernate state only if the
630 * Clock request feature is enabled.
631 * Enable the Clock request feature:
632 * - Enable HW clock control for UFS clocks in GCC (handled by the
633 * clock driver as part of clk_prepare_enable).
634 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
635 */
636 if (ufshcd_is_auto_hibern8_supported(hba))
637 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
638 UFS_HW_CLK_CTRL_EN,
639 UFS_AH8_CFG);
640 /*
641 * Make sure clock request feature gets enabled for HW clk gating
642 * before further operations.
643 */
644 mb();
645
646out:
647 return err;
648}
649
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200650static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
651 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200652{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200653 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200654
655 switch (status) {
656 case PRE_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700657 err = ufs_qcom_link_startup_pre_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200658 break;
659 case POST_CHANGE:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700660 err = ufs_qcom_link_startup_post_change(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200661 break;
662 default:
663 break;
664 }
665
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200666 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200667}
668
669static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
670{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200671 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200672 struct phy *phy = host->generic_phy;
673 int ret = 0;
674
675 if (ufs_qcom_is_link_off(hba)) {
676 /*
677 * Disable the tx/rx lane symbol clocks before PHY is
678 * powered down as the PLL source should be disabled
679 * after downstream clocks are disabled.
680 */
681 ufs_qcom_disable_lane_clks(host);
682 phy_power_off(phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700683 ret = ufs_qcom_ice_suspend(host);
684 if (ret)
685 dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
686 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200687
688 /* Assert PHY soft reset */
689 ufs_qcom_assert_reset(hba);
690 goto out;
691 }
692
693 /*
694 * If UniPro link is not active, PHY ref_clk, main PHY analog power
695 * rail and low noise analog power rail for PLL can be switched off.
696 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200697 if (!ufs_qcom_is_link_active(hba)) {
698 ufs_qcom_disable_lane_clks(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200699 phy_power_off(phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700700 ufs_qcom_ice_suspend(host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200701 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200702
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700703 /* Unvote PM QoS */
704 ufs_qcom_pm_qos_suspend(host);
705
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200706out:
707 return ret;
708}
709
710static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
711{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200712 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200713 struct phy *phy = host->generic_phy;
714 int err;
715
716 err = phy_power_on(phy);
717 if (err) {
718 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
719 __func__, err);
720 goto out;
721 }
722
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200723 err = ufs_qcom_enable_lane_clks(host);
724 if (err)
725 goto out;
726
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700727 err = ufs_qcom_ice_resume(host);
728 if (err) {
729 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
730 __func__, err);
731 goto out;
732 }
733
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200734 hba->is_sys_suspended = false;
735
736out:
737 return err;
738}
739
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700740static int ufs_qcom_full_reset(struct ufs_hba *hba)
741{
742 return -ENOTSUPP;
743}
744
745#ifdef CONFIG_SCSI_UFS_QCOM_ICE
746static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
747 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
748{
749 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
750 struct request *req;
751 int ret;
752
753 if (lrbp->cmd && lrbp->cmd->request)
754 req = lrbp->cmd->request;
755 else
756 return 0;
757
758 /* Use request LBA as the DUN value */
759 if (req->bio)
760 *dun = req->bio->bi_iter.bi_sector;
761
762 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
763
764 return ret;
765}
766
767static
768int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
769{
770 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
771 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
772 int err = 0;
773
774 if (!host->ice.pdev ||
775 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
776 goto out;
777
778 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
779out:
780 return err;
781}
782
783static
784int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
785 struct ufshcd_lrb *lrbp, struct request *req)
786{
787 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
788 int err = 0;
789
790 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
791 goto out;
792
793 err = ufs_qcom_ice_cfg_end(host, req);
794out:
795 return err;
796}
797
798static
799int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
800{
801 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
802 int err = 0;
803
804 if (!host->ice.pdev)
805 goto out;
806
807 err = ufs_qcom_ice_reset(host);
808out:
809 return err;
810}
811
812static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
813{
814 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
815
816 if (!status)
817 return -EINVAL;
818
819 return ufs_qcom_ice_get_status(host, status);
820}
821#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
822#define ufs_qcom_crypto_req_setup NULL
823#define ufs_qcom_crytpo_engine_cfg_start NULL
824#define ufs_qcom_crytpo_engine_cfg_end NULL
825#define ufs_qcom_crytpo_engine_reset NULL
826#define ufs_qcom_crypto_engine_get_status NULL
827#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
828
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200829struct ufs_qcom_dev_params {
830 u32 pwm_rx_gear; /* pwm rx gear to work in */
831 u32 pwm_tx_gear; /* pwm tx gear to work in */
832 u32 hs_rx_gear; /* hs rx gear to work in */
833 u32 hs_tx_gear; /* hs tx gear to work in */
834 u32 rx_lanes; /* number of rx lanes */
835 u32 tx_lanes; /* number of tx lanes */
836 u32 rx_pwr_pwm; /* rx pwm working pwr */
837 u32 tx_pwr_pwm; /* tx pwm working pwr */
838 u32 rx_pwr_hs; /* rx hs working pwr */
839 u32 tx_pwr_hs; /* tx hs working pwr */
840 u32 hs_rate; /* rate A/B to work in HS */
841 u32 desired_working_mode;
842};
843
844static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
845 struct ufs_pa_layer_attr *dev_max,
846 struct ufs_pa_layer_attr *agreed_pwr)
847{
848 int min_qcom_gear;
849 int min_dev_gear;
850 bool is_dev_sup_hs = false;
851 bool is_qcom_max_hs = false;
852
853 if (dev_max->pwr_rx == FAST_MODE)
854 is_dev_sup_hs = true;
855
856 if (qcom_param->desired_working_mode == FAST) {
857 is_qcom_max_hs = true;
858 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
859 qcom_param->hs_tx_gear);
860 } else {
861 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
862 qcom_param->pwm_tx_gear);
863 }
864
865 /*
866 * device doesn't support HS but qcom_param->desired_working_mode is
867 * HS, thus device and qcom_param don't agree
868 */
869 if (!is_dev_sup_hs && is_qcom_max_hs) {
870 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
871 __func__);
872 return -ENOTSUPP;
873 } else if (is_dev_sup_hs && is_qcom_max_hs) {
874 /*
875 * since device supports HS, it supports FAST_MODE.
876 * since qcom_param->desired_working_mode is also HS
877 * then final decision (FAST/FASTAUTO) is done according
878 * to qcom_params as it is the restricting factor
879 */
880 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
881 qcom_param->rx_pwr_hs;
882 } else {
883 /*
884 * here qcom_param->desired_working_mode is PWM.
885 * it doesn't matter whether device supports HS or PWM,
886 * in both cases qcom_param->desired_working_mode will
887 * determine the mode
888 */
889 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
890 qcom_param->rx_pwr_pwm;
891 }
892
893 /*
894 * we would like tx to work in the minimum number of lanes
895 * between device capability and vendor preferences.
896 * the same decision will be made for rx
897 */
898 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
899 qcom_param->tx_lanes);
900 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
901 qcom_param->rx_lanes);
902
903 /* device maximum gear is the minimum between device rx and tx gears */
904 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
905
906 /*
907 * if both device capabilities and vendor pre-defined preferences are
908 * both HS or both PWM then set the minimum gear to be the chosen
909 * working gear.
910 * if one is PWM and one is HS then the one that is PWM get to decide
911 * what is the gear, as it is the one that also decided previously what
912 * pwr the device will be configured to.
913 */
914 if ((is_dev_sup_hs && is_qcom_max_hs) ||
915 (!is_dev_sup_hs && !is_qcom_max_hs))
916 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
917 min_t(u32, min_dev_gear, min_qcom_gear);
918 else if (!is_dev_sup_hs)
919 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
920 else
921 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
922
923 agreed_pwr->hs_rate = qcom_param->hs_rate;
924 return 0;
925}
926
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700927#ifdef CONFIG_QCOM_BUS_SCALING
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200928static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
929 const char *speed_mode)
930{
931 struct device *dev = host->hba->dev;
932 struct device_node *np = dev->of_node;
933 int err;
934 const char *key = "qcom,bus-vector-names";
935
936 if (!speed_mode) {
937 err = -EINVAL;
938 goto out;
939 }
940
941 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
942 err = of_property_match_string(np, key, "MAX");
943 else
944 err = of_property_match_string(np, key, speed_mode);
945
946out:
947 if (err < 0)
948 dev_err(dev, "%s: Invalid %s mode %d\n",
949 __func__, speed_mode, err);
950 return err;
951}
952
953static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
954{
955 int gear = max_t(u32, p->gear_rx, p->gear_tx);
956 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
957 int pwr;
958
959 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
960 if (!gear)
961 gear = 1;
962
963 if (!lanes)
964 lanes = 1;
965
966 if (!p->pwr_rx && !p->pwr_tx) {
967 pwr = SLOWAUTO_MODE;
968 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
969 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
970 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
971 pwr = FAST_MODE;
972 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
973 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
974 } else {
975 pwr = SLOW_MODE;
976 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
977 "PWM", gear, lanes);
978 }
979}
980
981static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
982{
983 int err = 0;
984
985 if (vote != host->bus_vote.curr_vote) {
986 err = msm_bus_scale_client_update_request(
987 host->bus_vote.client_handle, vote);
988 if (err) {
989 dev_err(host->hba->dev,
990 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
991 __func__, host->bus_vote.client_handle,
992 vote, err);
993 goto out;
994 }
995
996 host->bus_vote.curr_vote = vote;
997 }
998out:
999 return err;
1000}
1001
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001002static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1003{
1004 int vote;
1005 int err = 0;
1006 char mode[BUS_VECTOR_NAME_LEN];
1007
1008 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1009
1010 vote = ufs_qcom_get_bus_vote(host, mode);
1011 if (vote >= 0)
1012 err = ufs_qcom_set_bus_vote(host, vote);
1013 else
1014 err = vote;
1015
1016 if (err)
1017 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1018 else
1019 host->bus_vote.saved_vote = vote;
1020 return err;
1021}
1022
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001023static ssize_t
1024show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1025 char *buf)
1026{
1027 struct ufs_hba *hba = dev_get_drvdata(dev);
1028 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1029
1030 return snprintf(buf, PAGE_SIZE, "%u\n",
1031 host->bus_vote.is_max_bw_needed);
1032}
1033
1034static ssize_t
1035store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1036 const char *buf, size_t count)
1037{
1038 struct ufs_hba *hba = dev_get_drvdata(dev);
1039 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1040 uint32_t value;
1041
1042 if (!kstrtou32(buf, 0, &value)) {
1043 host->bus_vote.is_max_bw_needed = !!value;
1044 ufs_qcom_update_bus_bw_vote(host);
1045 }
1046
1047 return count;
1048}
1049
1050static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1051{
1052 int err;
1053 struct msm_bus_scale_pdata *bus_pdata;
1054 struct device *dev = host->hba->dev;
1055 struct platform_device *pdev = to_platform_device(dev);
1056 struct device_node *np = dev->of_node;
1057
1058 bus_pdata = msm_bus_cl_get_pdata(pdev);
1059 if (!bus_pdata) {
1060 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1061 err = -ENODATA;
1062 goto out;
1063 }
1064
1065 err = of_property_count_strings(np, "qcom,bus-vector-names");
1066 if (err < 0 || err != bus_pdata->num_usecases) {
1067 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1068 __func__, err);
1069 goto out;
1070 }
1071
1072 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1073 if (!host->bus_vote.client_handle) {
1074 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1075 __func__);
1076 err = -EFAULT;
1077 goto out;
1078 }
1079
1080 /* cache the vote index for minimum and maximum bandwidth */
1081 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1082 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1083
1084 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1085 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1086 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1087 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1088 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1089 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1090out:
1091 return err;
1092}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001093#else /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001094static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1095{
1096 return 0;
1097}
1098
1099static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
1100{
1101 return 0;
1102}
1103
1104static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1105{
1106 return 0;
1107}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001108static inline void msm_bus_scale_unregister_client(uint32_t cl)
1109{
1110}
1111#endif /* CONFIG_QCOM_BUS_SCALING */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001112
1113static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1114{
1115 if (host->dev_ref_clk_ctrl_mmio &&
1116 (enable ^ host->is_dev_ref_clk_enabled)) {
1117 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1118
1119 if (enable)
1120 temp |= host->dev_ref_clk_en_mask;
1121 else
1122 temp &= ~host->dev_ref_clk_en_mask;
1123
1124 /*
1125 * If we are here to disable this clock it might be immediately
1126 * after entering into hibern8 in which case we need to make
1127 * sure that device ref_clk is active at least 1us after the
1128 * hibern8 enter.
1129 */
1130 if (!enable)
1131 udelay(1);
1132
1133 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1134
1135 /* ensure that ref_clk is enabled/disabled before we return */
1136 wmb();
1137
1138 /*
1139 * If we call hibern8 exit after this, we need to make sure that
1140 * device ref_clk is stable for at least 1us before the hibern8
1141 * exit command.
1142 */
1143 if (enable)
1144 udelay(1);
1145
1146 host->is_dev_ref_clk_enabled = enable;
1147 }
1148}
1149
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001150static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001151 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001152 struct ufs_pa_layer_attr *dev_max_params,
1153 struct ufs_pa_layer_attr *dev_req_params)
1154{
1155 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001156 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001157 struct phy *phy = host->generic_phy;
1158 struct ufs_qcom_dev_params ufs_qcom_cap;
1159 int ret = 0;
1160 int res = 0;
1161
1162 if (!dev_req_params) {
1163 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1164 ret = -EINVAL;
1165 goto out;
1166 }
1167
1168 switch (status) {
1169 case PRE_CHANGE:
1170 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1171 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1172 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1173 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1174 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1175 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1176 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1177 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1178 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1179 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1180 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1181 ufs_qcom_cap.desired_working_mode =
1182 UFS_QCOM_LIMIT_DESIRED_MODE;
1183
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001184 if (host->hw_ver.major == 0x1) {
1185 /*
1186 * HS-G3 operations may not reliably work on legacy QCOM
1187 * UFS host controller hardware even though capability
1188 * exchange during link startup phase may end up
1189 * negotiating maximum supported gear as G3.
1190 * Hence downgrade the maximum supported gear to HS-G2.
1191 */
1192 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1193 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1194 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1195 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1196 }
1197
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001198 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1199 dev_max_params,
1200 dev_req_params);
1201 if (ret) {
1202 pr_err("%s: failed to determine capabilities\n",
1203 __func__);
1204 goto out;
1205 }
1206
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001207 /* enable the device ref clock before changing to HS mode */
1208 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1209 ufshcd_is_hs_mode(dev_req_params))
1210 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001211 break;
1212 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001213 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001214 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001215 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001216 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1217 __func__);
1218 /*
1219 * we return error code at the end of the routine,
1220 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1221 * and bus voting as usual
1222 */
1223 ret = -EINVAL;
1224 }
1225
1226 val = ~(MAX_U32 << dev_req_params->lane_tx);
1227 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1228 if (res) {
1229 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1230 __func__, res);
1231 ret = res;
1232 }
1233
1234 /* cache the power mode parameters to use internally */
1235 memcpy(&host->dev_req_params,
1236 dev_req_params, sizeof(*dev_req_params));
1237 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001238
1239 /* disable the device ref clock if entered PWM mode */
1240 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1241 !ufshcd_is_hs_mode(dev_req_params))
1242 ufs_qcom_dev_ref_clk_ctrl(host, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001243 break;
1244 default:
1245 ret = -EINVAL;
1246 break;
1247 }
1248out:
1249 return ret;
1250}
1251
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001252static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1253{
1254 int err;
1255 u32 pa_vs_config_reg1;
1256
1257 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1258 &pa_vs_config_reg1);
1259 if (err)
1260 goto out;
1261
1262 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1263 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1264 (pa_vs_config_reg1 | (1 << 12)));
1265
1266out:
1267 return err;
1268}
1269
1270static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1271{
1272 int err = 0;
1273
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08001274 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001275 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1276
1277 return err;
1278}
1279
Yaniv Gardiae977582015-05-17 18:55:06 +03001280static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1281{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001282 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001283
1284 if (host->hw_ver.major == 0x1)
1285 return UFSHCI_VERSION_11;
1286 else
1287 return UFSHCI_VERSION_20;
1288}
1289
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001290/**
1291 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1292 * @hba: host controller instance
1293 *
1294 * QCOM UFS host controller might have some non standard behaviours (quirks)
1295 * than what is specified by UFSHCI specification. Advertise all such
1296 * quirks to standard UFS host controller driver so standard takes them into
1297 * account.
1298 */
1299static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1300{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001301 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001302
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001303 if (host->hw_ver.major == 0x1) {
1304 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1305 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1306 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001307
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001308 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001309 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001310
1311 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001312 }
1313
Subhash Jadavanic04fcdd2016-08-05 11:20:10 -07001314 if (host->hw_ver.major == 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001315 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001316
Yaniv Gardicad2e032015-03-31 17:37:14 +03001317 if (!ufs_qcom_cap_qunipro(host))
1318 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001319 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001320 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001321 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001322 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001323
1324 if (host->disable_lpm)
1325 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
Yaniv Gardicad2e032015-03-31 17:37:14 +03001326}
1327
1328static void ufs_qcom_set_caps(struct ufs_hba *hba)
1329{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001330 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001331
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001332 if (!host->disable_lpm) {
1333 hba->caps |= UFSHCD_CAP_CLK_GATING;
1334 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1335 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1336 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001337 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001338
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001339 if (host->hw_ver.major >= 0x2) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001340 if (!host->disable_lpm)
1341 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001342 host->caps = UFS_QCOM_CAP_QUNIPRO |
1343 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001344 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001345 if (host->hw_ver.major >= 0x3) {
1346 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1347 /*
1348 * The UFS PHY attached to v3.0.0 controller supports entering
1349 * deeper low power state of SVS2. This lets the controller
1350 * run at much lower clock frequencies for saving power.
1351 * Assuming this and any future revisions of the controller
1352 * support this capability. Need to revist this assumption if
1353 * any future platform with this core doesn't support the
1354 * capability, as there will be no benefit running at lower
1355 * frequencies then.
1356 */
1357 host->caps |= UFS_QCOM_CAP_SVS2;
1358 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001359}
1360
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001361/**
1362 * ufs_qcom_setup_clocks - enables/disable clocks
1363 * @hba: host controller instance
1364 * @on: If true, enable clocks else disable them.
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001365 * @is_gating_context: If true then it means this function is called from
1366 * aggressive clock gating context and we may only need to gate off important
1367 * clocks. If false then make sure to gate off all clocks.
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001368 *
1369 * Returns 0 on success, non-zero on failure.
1370 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001371static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1372 bool is_gating_context)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001373{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001374 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001375 int err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001376 int vote = 0;
1377
1378 /*
1379 * In case ufs_qcom_init() is not yet done, simply ignore.
1380 * This ufs_qcom_setup_clocks() shall be called from
1381 * ufs_qcom_init() after init is done.
1382 */
1383 if (!host)
1384 return 0;
1385
1386 if (on) {
1387 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1388 if (err)
1389 goto out;
1390
1391 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1392 if (err) {
1393 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1394 __func__, err);
1395 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1396 goto out;
1397 }
Yaniv Gardif37aabc2016-03-10 17:37:20 +02001398 /* enable the device ref clock for HS mode*/
1399 if (ufshcd_is_hs_mode(&hba->pwr_info))
1400 ufs_qcom_dev_ref_clk_ctrl(host, true);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001401 vote = host->bus_vote.saved_vote;
1402 if (vote == host->bus_vote.min_bw_vote)
1403 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001404
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001405 err = ufs_qcom_ice_resume(host);
1406 if (err)
1407 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001408 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001409 err = ufs_qcom_ice_suspend(host);
1410 if (err)
1411 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001412
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001413 /* M-PHY RMMI interface clocks can be turned off */
1414 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001415 if (!ufs_qcom_is_link_active(hba)) {
1416 if (!is_gating_context)
1417 /* turn off UFS local PHY ref_clk */
1418 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001419 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001420 ufs_qcom_dev_ref_clk_ctrl(host, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001421 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001422 vote = host->bus_vote.min_bw_vote;
1423 }
1424
1425 err = ufs_qcom_set_bus_vote(host, vote);
1426 if (err)
1427 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1428 __func__, err);
1429
1430out:
1431 return err;
1432}
1433
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001434#ifdef CONFIG_SMP /* CONFIG_SMP */
1435static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1436{
1437 int i;
1438
1439 if (cpu >= 0 && cpu < num_possible_cpus())
1440 for (i = 0; i < host->pm_qos.num_groups; i++)
1441 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1442 return i;
1443
1444 return host->pm_qos.default_cpu;
1445}
1446
1447static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1448{
1449 unsigned long flags;
1450 struct ufs_qcom_host *host;
1451 struct ufs_qcom_pm_qos_cpu_group *group;
1452
1453 if (!hba || !req)
1454 return;
1455
1456 host = ufshcd_get_variant(hba);
1457 if (!host->pm_qos.groups)
1458 return;
1459
1460 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1461
1462 spin_lock_irqsave(hba->host->host_lock, flags);
1463 if (!host->pm_qos.is_enabled)
1464 goto out;
1465
1466 group->active_reqs++;
1467 if (group->state != PM_QOS_REQ_VOTE &&
1468 group->state != PM_QOS_VOTED) {
1469 group->state = PM_QOS_REQ_VOTE;
1470 queue_work(host->pm_qos.workq, &group->vote_work);
1471 }
1472out:
1473 spin_unlock_irqrestore(hba->host->host_lock, flags);
1474}
1475
1476/* hba->host->host_lock is assumed to be held by caller */
1477static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1478{
1479 struct ufs_qcom_pm_qos_cpu_group *group;
1480
1481 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1482 return;
1483
1484 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1485
1486 if (--group->active_reqs)
1487 return;
1488 group->state = PM_QOS_REQ_UNVOTE;
1489 queue_work(host->pm_qos.workq, &group->unvote_work);
1490}
1491
1492static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1493 bool should_lock)
1494{
1495 unsigned long flags = 0;
1496
1497 if (!hba || !req)
1498 return;
1499
1500 if (should_lock)
1501 spin_lock_irqsave(hba->host->host_lock, flags);
1502 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1503 if (should_lock)
1504 spin_unlock_irqrestore(hba->host->host_lock, flags);
1505}
1506
1507static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1508{
1509 struct ufs_qcom_pm_qos_cpu_group *group =
1510 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1511 struct ufs_qcom_host *host = group->host;
1512 unsigned long flags;
1513
1514 spin_lock_irqsave(host->hba->host->host_lock, flags);
1515
1516 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1517 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1518 return;
1519 }
1520
1521 group->state = PM_QOS_VOTED;
1522 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1523
1524 pm_qos_update_request(&group->req, group->latency_us);
1525}
1526
1527static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1528{
1529 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1530 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1531 struct ufs_qcom_host *host = group->host;
1532 unsigned long flags;
1533
1534 /*
1535 * Check if new requests were submitted in the meantime and do not
1536 * unvote if so.
1537 */
1538 spin_lock_irqsave(host->hba->host->host_lock, flags);
1539
1540 if (!host->pm_qos.is_enabled || group->active_reqs) {
1541 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1542 return;
1543 }
1544
1545 group->state = PM_QOS_UNVOTED;
1546 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1547
1548 pm_qos_update_request_timeout(&group->req,
1549 group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
1550}
1551
1552static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554{
1555 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1556 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1557
1558 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1559}
1560
1561static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1562 struct device_attribute *attr, const char *buf, size_t count)
1563{
1564 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1565 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1566 unsigned long value;
1567 unsigned long flags;
1568 bool enable;
1569 int i;
1570
1571 if (kstrtoul(buf, 0, &value))
1572 return -EINVAL;
1573
1574 enable = !!value;
1575
1576 /*
1577 * Must take the spinlock and save irqs before changing the enabled
1578 * flag in order to keep correctness of PM QoS release.
1579 */
1580 spin_lock_irqsave(hba->host->host_lock, flags);
1581 if (enable == host->pm_qos.is_enabled) {
1582 spin_unlock_irqrestore(hba->host->host_lock, flags);
1583 return count;
1584 }
1585 host->pm_qos.is_enabled = enable;
1586 spin_unlock_irqrestore(hba->host->host_lock, flags);
1587
1588 if (!enable)
1589 for (i = 0; i < host->pm_qos.num_groups; i++) {
1590 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1591 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1592 spin_lock_irqsave(hba->host->host_lock, flags);
1593 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1594 host->pm_qos.groups[i].active_reqs = 0;
1595 spin_unlock_irqrestore(hba->host->host_lock, flags);
1596 pm_qos_update_request(&host->pm_qos.groups[i].req,
1597 PM_QOS_DEFAULT_VALUE);
1598 }
1599
1600 return count;
1601}
1602
1603static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1604 struct device_attribute *attr, char *buf)
1605{
1606 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1607 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1608 int ret;
1609 int i;
1610 int offset = 0;
1611
1612 for (i = 0; i < host->pm_qos.num_groups; i++) {
1613 ret = snprintf(&buf[offset], PAGE_SIZE,
1614 "cpu group #%d(mask=0x%lx): %d\n", i,
1615 host->pm_qos.groups[i].mask.bits[0],
1616 host->pm_qos.groups[i].latency_us);
1617 if (ret > 0)
1618 offset += ret;
1619 else
1620 break;
1621 }
1622
1623 return offset;
1624}
1625
1626static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1627 struct device_attribute *attr, const char *buf, size_t count)
1628{
1629 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1630 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1631 unsigned long value;
1632 unsigned long flags;
1633 char *strbuf;
1634 char *strbuf_copy;
1635 char *token;
1636 int i;
1637 int ret;
1638
1639 /* reserve one byte for null termination */
1640 strbuf = kmalloc(count + 1, GFP_KERNEL);
1641 if (!strbuf)
1642 return -ENOMEM;
1643 strbuf_copy = strbuf;
1644 strlcpy(strbuf, buf, count + 1);
1645
1646 for (i = 0; i < host->pm_qos.num_groups; i++) {
1647 token = strsep(&strbuf, ",");
1648 if (!token)
1649 break;
1650
1651 ret = kstrtoul(token, 0, &value);
1652 if (ret)
1653 break;
1654
1655 spin_lock_irqsave(hba->host->host_lock, flags);
1656 host->pm_qos.groups[i].latency_us = value;
1657 spin_unlock_irqrestore(hba->host->host_lock, flags);
1658 }
1659
1660 kfree(strbuf_copy);
1661 return count;
1662}
1663
1664static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1665{
1666 struct device_node *node = host->hba->dev->of_node;
1667 struct device_attribute *attr;
1668 int ret = 0;
1669 int num_groups;
1670 int num_values;
1671 char wq_name[sizeof("ufs_pm_qos_00")];
1672 int i;
1673
1674 num_groups = of_property_count_u32_elems(node,
1675 "qcom,pm-qos-cpu-groups");
1676 if (num_groups <= 0)
1677 goto no_pm_qos;
1678
1679 num_values = of_property_count_u32_elems(node,
1680 "qcom,pm-qos-cpu-group-latency-us");
1681 if (num_values <= 0)
1682 goto no_pm_qos;
1683
1684 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1685 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1686 __func__, num_groups, num_values, num_possible_cpus());
1687 goto no_pm_qos;
1688 }
1689
1690 host->pm_qos.num_groups = num_groups;
1691 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1692 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1693 if (!host->pm_qos.groups)
1694 return -ENOMEM;
1695
1696 for (i = 0; i < host->pm_qos.num_groups; i++) {
1697 u32 mask;
1698
1699 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1700 i, &mask);
1701 if (ret)
1702 goto free_groups;
1703 host->pm_qos.groups[i].mask.bits[0] = mask;
1704 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1705 cpu_possible_mask)) {
1706 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1707 __func__, mask);
1708 goto free_groups;
1709 }
1710
1711 ret = of_property_read_u32_index(node,
1712 "qcom,pm-qos-cpu-group-latency-us", i,
1713 &host->pm_qos.groups[i].latency_us);
1714 if (ret)
1715 goto free_groups;
1716
1717 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
1718 host->pm_qos.groups[i].req.cpus_affine =
1719 host->pm_qos.groups[i].mask;
1720 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1721 host->pm_qos.groups[i].active_reqs = 0;
1722 host->pm_qos.groups[i].host = host;
1723
1724 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1725 ufs_qcom_pm_qos_vote_work);
1726 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1727 ufs_qcom_pm_qos_unvote_work);
1728 }
1729
1730 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1731 &host->pm_qos.default_cpu);
1732 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1733 host->pm_qos.default_cpu = 0;
1734
1735 /*
1736 * Use a single-threaded workqueue to assure work submitted to the queue
1737 * is performed in order. Consider the following 2 possible cases:
1738 *
1739 * 1. A new request arrives and voting work is scheduled for it. Before
1740 * the voting work is performed the request is finished and unvote
1741 * work is also scheduled.
1742 * 2. A request is finished and unvote work is scheduled. Before the
1743 * work is performed a new request arrives and voting work is also
1744 * scheduled.
1745 *
1746 * In both cases a vote work and unvote work wait to be performed.
1747 * If ordering is not guaranteed, then the end state might be the
1748 * opposite of the desired state.
1749 */
1750 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1751 host->hba->host->host_no);
1752 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1753 if (!host->pm_qos.workq) {
1754 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1755 __func__);
1756 ret = -ENOMEM;
1757 goto free_groups;
1758 }
1759
1760 /* Initialization was ok, add all PM QoS requests */
1761 for (i = 0; i < host->pm_qos.num_groups; i++)
1762 pm_qos_add_request(&host->pm_qos.groups[i].req,
1763 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1764
1765 /* PM QoS latency sys-fs attribute */
1766 attr = &host->pm_qos.latency_attr;
1767 attr->show = ufs_qcom_pm_qos_latency_show;
1768 attr->store = ufs_qcom_pm_qos_latency_store;
1769 sysfs_attr_init(&attr->attr);
1770 attr->attr.name = "pm_qos_latency_us";
1771 attr->attr.mode = S_IRUGO | S_IWUSR;
1772 if (device_create_file(host->hba->var->dev, attr))
1773 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1774
1775 /* PM QoS enable sys-fs attribute */
1776 attr = &host->pm_qos.enable_attr;
1777 attr->show = ufs_qcom_pm_qos_enable_show;
1778 attr->store = ufs_qcom_pm_qos_enable_store;
1779 sysfs_attr_init(&attr->attr);
1780 attr->attr.name = "pm_qos_enable";
1781 attr->attr.mode = S_IRUGO | S_IWUSR;
1782 if (device_create_file(host->hba->var->dev, attr))
1783 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1784
1785 host->pm_qos.is_enabled = true;
1786
1787 return 0;
1788
1789free_groups:
1790 kfree(host->pm_qos.groups);
1791no_pm_qos:
1792 host->pm_qos.groups = NULL;
1793 return ret ? ret : -ENOTSUPP;
1794}
1795
1796static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1797{
1798 int i;
1799
1800 if (!host->pm_qos.groups)
1801 return;
1802
1803 for (i = 0; i < host->pm_qos.num_groups; i++)
1804 flush_work(&host->pm_qos.groups[i].unvote_work);
1805}
1806
1807static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1808{
1809 int i;
1810
1811 if (!host->pm_qos.groups)
1812 return;
1813
1814 for (i = 0; i < host->pm_qos.num_groups; i++)
1815 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1816 destroy_workqueue(host->pm_qos.workq);
1817
1818 kfree(host->pm_qos.groups);
1819 host->pm_qos.groups = NULL;
1820}
1821#endif /* CONFIG_SMP */
1822
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001823/*
1824 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
1825 */
1826static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
1827{
1828 struct device_node *node = host->hba->dev->of_node;
1829
1830 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
1831 if (host->disable_lpm)
1832 pr_info("%s: will disable all LPM modes\n", __func__);
1833}
1834
Subhash Jadavania889db02016-12-09 10:24:58 -08001835static void ufs_qcom_save_host_ptr(struct ufs_hba *hba)
1836{
1837 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1838 int id;
1839
1840 if (!hba->dev->of_node)
1841 return;
1842
1843 /* Extract platform data */
1844 id = of_alias_get_id(hba->dev->of_node, "ufshc");
1845 if (id <= 0)
1846 dev_err(hba->dev, "Failed to get host index %d\n", id);
1847 else if (id <= MAX_UFS_QCOM_HOSTS)
1848 ufs_qcom_hosts[id - 1] = host;
1849 else
1850 dev_err(hba->dev, "invalid host index %d\n", id);
1851}
1852
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001853/**
1854 * ufs_qcom_init - bind phy with controller
1855 * @hba: host controller instance
1856 *
1857 * Binds PHY with controller and powers up PHY enabling clocks
1858 * and regulators.
1859 *
1860 * Returns -EPROBE_DEFER if binding fails, returns negative error
1861 * on phy power up failure and returns zero on success.
1862 */
1863static int ufs_qcom_init(struct ufs_hba *hba)
1864{
1865 int err;
1866 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001867 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001868 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001869 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001870
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001871 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1872 if (!host) {
1873 err = -ENOMEM;
1874 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1875 goto out;
1876 }
1877
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001878 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001879 host->hba = hba;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001880 ufshcd_set_variant(hba, host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001881
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001882 /*
1883 * voting/devoting device ref_clk source is time consuming hence
1884 * skip devoting it during aggressive clock gating. This clock
1885 * will still be gated off during runtime suspend.
1886 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001887 hba->no_ref_clk_gating = true;
1888
1889 err = ufs_qcom_ice_get_dev(host);
1890 if (err == -EPROBE_DEFER) {
1891 /*
1892 * UFS driver might be probed before ICE driver does.
1893 * In that case we would like to return EPROBE_DEFER code
1894 * in order to delay its probing.
1895 */
1896 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
1897 __func__, err);
1898 goto out_host_free;
1899
1900 } else if (err == -ENODEV) {
1901 /*
1902 * ICE device is not enabled in DTS file. No need for further
1903 * initialization of ICE driver.
1904 */
1905 dev_warn(dev, "%s: ICE device is not enabled",
1906 __func__);
1907 } else if (err) {
1908 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
1909 __func__, err);
1910 goto out_host_free;
1911 }
1912
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001913 host->generic_phy = devm_phy_get(dev, "ufsphy");
1914
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001915 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1916 /*
1917 * UFS driver might be probed before the phy driver does.
1918 * In that case we would like to return EPROBE_DEFER code.
1919 */
1920 err = -EPROBE_DEFER;
1921 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1922 __func__, err);
1923 goto out_host_free;
1924 } else if (IS_ERR(host->generic_phy)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001925 err = PTR_ERR(host->generic_phy);
1926 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1927 goto out;
1928 }
1929
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001930 err = ufs_qcom_pm_qos_init(host);
1931 if (err)
1932 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
1933
1934 /* restore the secure configuration */
1935 ufs_qcom_update_sec_cfg(hba, true);
1936
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001937 err = ufs_qcom_bus_register(host);
1938 if (err)
1939 goto out_host_free;
1940
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03001941 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1942 &host->hw_ver.minor, &host->hw_ver.step);
1943
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001944 /*
1945 * for newer controllers, device reference clock control bit has
1946 * moved inside UFS controller register address space itself.
1947 */
1948 if (host->hw_ver.major >= 0x02) {
1949 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1950 host->dev_ref_clk_en_mask = BIT(26);
1951 } else {
1952 /* "dev_ref_clk_ctrl_mem" is optional resource */
1953 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1954 if (res) {
1955 host->dev_ref_clk_ctrl_mmio =
1956 devm_ioremap_resource(dev, res);
1957 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1958 dev_warn(dev,
1959 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1960 __func__,
1961 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1962 host->dev_ref_clk_ctrl_mmio = NULL;
1963 }
1964 host->dev_ref_clk_en_mask = BIT(5);
1965 }
1966 }
1967
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03001968 /* update phy revision information before calling phy_init() */
1969 ufs_qcom_phy_save_controller_version(host->generic_phy,
1970 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1971
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001972 phy_init(host->generic_phy);
1973 err = phy_power_on(host->generic_phy);
1974 if (err)
1975 goto out_unregister_bus;
1976
1977 err = ufs_qcom_init_lane_clks(host);
1978 if (err)
1979 goto out_disable_phy;
1980
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001981 ufs_qcom_parse_lpm(host);
1982 if (host->disable_lpm)
1983 pm_runtime_forbid(host->hba->dev);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001984 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001985 ufs_qcom_advertise_quirks(hba);
1986
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001987 ufs_qcom_setup_clocks(hba, true, false);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001988
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001989 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1990 ufs_qcom_get_default_testbus_cfg(host);
1991 err = ufs_qcom_testbus_config(host);
1992 if (err) {
1993 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1994 __func__, err);
1995 err = 0;
1996 }
1997
Subhash Jadavania889db02016-12-09 10:24:58 -08001998 ufs_qcom_save_host_ptr(hba);
1999
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002000 goto out;
2001
2002out_disable_phy:
2003 phy_power_off(host->generic_phy);
2004out_unregister_bus:
2005 phy_exit(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002006 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002007out_host_free:
2008 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002009 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002010out:
2011 return err;
2012}
2013
2014static void ufs_qcom_exit(struct ufs_hba *hba)
2015{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002016 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002017
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002018 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002019 ufs_qcom_disable_lane_clks(host);
2020 phy_power_off(host->generic_phy);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002021 ufs_qcom_pm_qos_remove(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002022}
2023
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002024static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2025 u32 clk_cycles)
2026{
2027 int err;
2028 u32 core_clk_ctrl_reg;
2029
2030 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2031 return -EINVAL;
2032
2033 err = ufshcd_dme_get(hba,
2034 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2035 &core_clk_ctrl_reg);
2036 if (err)
2037 goto out;
2038
2039 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2040 core_clk_ctrl_reg |= clk_cycles;
2041
2042 /* Clear CORE_CLK_DIV_EN */
2043 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2044
2045 err = ufshcd_dme_set(hba,
2046 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2047 core_clk_ctrl_reg);
2048out:
2049 return err;
2050}
2051
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002052static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
2053{
2054 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2055 struct phy *phy = host->generic_phy;
2056 int err = 0;
2057
2058 /* The default low power mode configuration is SVS2 */
2059 if (!ufs_qcom_cap_svs2(host))
2060 goto out;
2061
2062 /*
2063 * The link should be put in hibern8 state before
2064 * configuring the PHY to enter/exit SVS2 mode.
2065 */
2066 err = ufshcd_uic_hibern8_enter(hba);
2067 if (err)
2068 goto out;
2069
2070 err = ufs_qcom_phy_configure_lpm(phy, enable);
2071 if (err)
2072 goto out;
2073
2074 err = ufshcd_uic_hibern8_exit(hba);
2075out:
2076 return err;
2077}
2078
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002079static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2080{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002081 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2082
2083 if (!ufs_qcom_cap_qunipro(host))
2084 return 0;
2085
2086 return ufs_qcom_configure_lpm(hba, false);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002087}
2088
2089static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
2090{
2091 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2092
2093 if (!ufs_qcom_cap_qunipro(host))
2094 return 0;
2095
2096 /* set unipro core clock cycles to 150 and clear clock divider */
2097 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2098}
2099
2100static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
2101{
2102 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002103 u32 core_clk_ctrl_reg;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002104 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002105
2106 if (!ufs_qcom_cap_qunipro(host))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002107 goto out;
2108
2109 err = ufs_qcom_configure_lpm(hba, true);
2110 if (err)
2111 goto out;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002112
2113 err = ufshcd_dme_get(hba,
2114 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2115 &core_clk_ctrl_reg);
2116
2117 /* make sure CORE_CLK_DIV_EN is cleared */
2118 if (!err &&
2119 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
2120 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2121 err = ufshcd_dme_set(hba,
2122 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2123 core_clk_ctrl_reg);
2124 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002125out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002126 return err;
2127}
2128
2129static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2130{
2131 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002132 int err = 0;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002133
2134 if (!ufs_qcom_cap_qunipro(host))
2135 return 0;
2136
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002137 if (ufs_qcom_cap_svs2(host))
2138 /*
2139 * For SVS2 set unipro core clock cycles to 37 and
2140 * clear clock divider
2141 */
2142 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2143 else
2144 /*
2145 * For SVS set unipro core clock cycles to 75 and
2146 * clear clock divider
2147 */
2148 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2149
2150 return err;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002151}
2152
2153static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2154 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002155{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02002156 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002157 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002158 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002159
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002160 switch (status) {
2161 case PRE_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002162 if (scale_up)
2163 err = ufs_qcom_clk_scale_up_pre_change(hba);
2164 else
2165 err = ufs_qcom_clk_scale_down_pre_change(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002166 break;
2167 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002168 if (scale_up)
2169 err = ufs_qcom_clk_scale_up_post_change(hba);
2170 else
2171 err = ufs_qcom_clk_scale_down_post_change(hba);
2172
2173 if (err || !dev_req_params)
2174 goto out;
2175
2176 ufs_qcom_cfg_timers(hba,
2177 dev_req_params->gear_rx,
2178 dev_req_params->pwr_rx,
2179 dev_req_params->hs_rate,
2180 false);
2181 ufs_qcom_update_bus_bw_vote(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002182 break;
2183 default:
2184 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2185 err = -EINVAL;
2186 break;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02002187 }
2188
2189out:
2190 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002191}
2192
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002193/*
2194 * This function should be called to restore the security configuration of UFS
2195 * register space after coming out of UFS host core power collapse.
2196 *
2197 * @hba: host controller instance
2198 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2199 * and set "false" when secure configuration is lost.
2200 */
2201static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2202{
2203 return 0;
2204}
2205
2206
2207static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2208{
2209 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2210
2211 if (ufs_qcom_cap_svs2(host))
2212 return UFS_HS_G1;
2213 /* Default SVS support @ HS G2 frequencies*/
2214 return UFS_HS_G2;
2215}
2216
2217void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2218 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2219 char *str, void *priv))
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002220{
2221 u32 reg;
2222 struct ufs_qcom_host *host;
2223
2224 if (unlikely(!hba)) {
2225 pr_err("%s: hba is NULL\n", __func__);
2226 return;
2227 }
2228 if (unlikely(!print_fn)) {
2229 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2230 return;
2231 }
2232
2233 host = ufshcd_get_variant(hba);
2234 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2235 return;
2236
2237 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2238 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2239
2240 reg = ufshcd_readl(hba, REG_UFS_CFG1);
2241 reg |= UFS_BIT(17);
2242 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2243
2244 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2245 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2246
2247 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2248 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2249
2250 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2251 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2252
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002253 /* clear bit 17 - UTP_DBG_RAMS_EN */
2254 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002255
2256 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2257 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2258
2259 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2260 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2261
2262 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2263 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2264
2265 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2266 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2267
2268 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2269 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2270
2271 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2272 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2273
2274 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2275 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2276}
2277
2278static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2279{
2280 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
2281 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
2282 else
2283 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
2284}
2285
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002286static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2287{
2288 /* provide a legal default configuration */
2289 host->testbus.select_major = TSTBUS_UAWM;
2290 host->testbus.select_minor = 1;
2291}
2292
2293static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
2294{
2295 if (host->testbus.select_major >= TSTBUS_MAX) {
2296 dev_err(host->hba->dev,
2297 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
2298 __func__, host->testbus.select_major);
2299 return false;
2300 }
2301
2302 /*
2303 * Not performing check for each individual select_major
2304 * mappings of select_minor, since there is no harm in
2305 * configuring a non-existent select_minor
2306 */
2307 if (host->testbus.select_minor > 0x1F) {
2308 dev_err(host->hba->dev,
2309 "%s: 0x%05X is not a legal testbus option\n",
2310 __func__, host->testbus.select_minor);
2311 return false;
2312 }
2313
2314 return true;
2315}
2316
2317int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2318{
2319 int reg;
2320 int offset;
2321 u32 mask = TEST_BUS_SUB_SEL_MASK;
2322
2323 if (!host)
2324 return -EINVAL;
2325
2326 if (!ufs_qcom_testbus_cfg_is_ok(host))
2327 return -EPERM;
2328
2329 switch (host->testbus.select_major) {
2330 case TSTBUS_UAWM:
2331 reg = UFS_TEST_BUS_CTRL_0;
2332 offset = 24;
2333 break;
2334 case TSTBUS_UARM:
2335 reg = UFS_TEST_BUS_CTRL_0;
2336 offset = 16;
2337 break;
2338 case TSTBUS_TXUC:
2339 reg = UFS_TEST_BUS_CTRL_0;
2340 offset = 8;
2341 break;
2342 case TSTBUS_RXUC:
2343 reg = UFS_TEST_BUS_CTRL_0;
2344 offset = 0;
2345 break;
2346 case TSTBUS_DFC:
2347 reg = UFS_TEST_BUS_CTRL_1;
2348 offset = 24;
2349 break;
2350 case TSTBUS_TRLUT:
2351 reg = UFS_TEST_BUS_CTRL_1;
2352 offset = 16;
2353 break;
2354 case TSTBUS_TMRLUT:
2355 reg = UFS_TEST_BUS_CTRL_1;
2356 offset = 8;
2357 break;
2358 case TSTBUS_OCSC:
2359 reg = UFS_TEST_BUS_CTRL_1;
2360 offset = 0;
2361 break;
2362 case TSTBUS_WRAPPER:
2363 reg = UFS_TEST_BUS_CTRL_2;
2364 offset = 16;
2365 break;
2366 case TSTBUS_COMBINED:
2367 reg = UFS_TEST_BUS_CTRL_2;
2368 offset = 8;
2369 break;
2370 case TSTBUS_UTP_HCI:
2371 reg = UFS_TEST_BUS_CTRL_2;
2372 offset = 0;
2373 break;
2374 case TSTBUS_UNIPRO:
2375 reg = UFS_UNIPRO_CFG;
2376 offset = 1;
2377 break;
2378 /*
2379 * No need for a default case, since
2380 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2381 * is legal
2382 */
2383 }
2384 mask <<= offset;
2385
2386 pm_runtime_get_sync(host->hba->dev);
2387 ufshcd_hold(host->hba, false);
2388 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2389 (u32)host->testbus.select_major << 19,
2390 REG_UFS_CFG1);
2391 ufshcd_rmwl(host->hba, mask,
2392 (u32)host->testbus.select_minor << offset,
2393 reg);
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002394 ufs_qcom_enable_test_bus(host);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002395 ufshcd_release(host->hba, false);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002396 pm_runtime_put_sync(host->hba->dev);
2397
2398 return 0;
2399}
2400
2401static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2402{
2403 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2404}
2405
2406static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
2407{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002408 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2409
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002410 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2411 "HCI Vendor Specific Registers ");
2412
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002413 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002414 ufs_qcom_testbus_read(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002415 ufs_qcom_ice_print_regs(host);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002416}
Yaniv Gardieba5ed32016-03-10 17:37:21 +02002417
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002418/**
2419 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2420 *
2421 * The variant operations configure the necessary controller and PHY
2422 * handshake during initialization.
2423 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02002424static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002425 .init = ufs_qcom_init,
2426 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03002427 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002428 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2429 .setup_clocks = ufs_qcom_setup_clocks,
2430 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2431 .link_startup_notify = ufs_qcom_link_startup_notify,
2432 .pwr_change_notify = ufs_qcom_pwr_change_notify,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002433 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002434 .suspend = ufs_qcom_suspend,
2435 .resume = ufs_qcom_resume,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002436 .full_reset = ufs_qcom_full_reset,
2437 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2438 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02002439 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002440#ifdef CONFIG_DEBUG_FS
2441 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2442#endif
2443};
2444
2445static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2446 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2447 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2448 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2449 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2450 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2451};
2452
2453static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2454 .req_start = ufs_qcom_pm_qos_req_start,
2455 .req_end = ufs_qcom_pm_qos_req_end,
2456};
2457
2458static struct ufs_hba_variant ufs_hba_qcom_variant = {
2459 .name = "qcom",
2460 .vops = &ufs_hba_qcom_vops,
2461 .crypto_vops = &ufs_hba_crypto_variant_ops,
2462 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02002463};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002464
Yaniv Gardi47555a52015-10-28 13:15:49 +02002465/**
2466 * ufs_qcom_probe - probe routine of the driver
2467 * @pdev: pointer to Platform device handle
2468 *
2469 * Return zero for success and non-zero for failure
2470 */
2471static int ufs_qcom_probe(struct platform_device *pdev)
2472{
2473 int err;
2474 struct device *dev = &pdev->dev;
2475
2476 /* Perform generic probe */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002477 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
Yaniv Gardi47555a52015-10-28 13:15:49 +02002478 if (err)
2479 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2480
2481 return err;
2482}
2483
2484/**
2485 * ufs_qcom_remove - set driver_data of the device to NULL
2486 * @pdev: pointer to platform device handle
2487 *
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002488 * Always return 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02002489 */
2490static int ufs_qcom_remove(struct platform_device *pdev)
2491{
2492 struct ufs_hba *hba = platform_get_drvdata(pdev);
2493
2494 pm_runtime_get_sync(&(pdev)->dev);
2495 ufshcd_remove(hba);
2496 return 0;
2497}
2498
2499static const struct of_device_id ufs_qcom_of_match[] = {
2500 { .compatible = "qcom,ufshc"},
2501 {},
2502};
2503
2504static const struct dev_pm_ops ufs_qcom_pm_ops = {
2505 .suspend = ufshcd_pltfrm_suspend,
2506 .resume = ufshcd_pltfrm_resume,
2507 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2508 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2509 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2510};
2511
2512static struct platform_driver ufs_qcom_pltform = {
2513 .probe = ufs_qcom_probe,
2514 .remove = ufs_qcom_remove,
2515 .shutdown = ufshcd_pltfrm_shutdown,
2516 .driver = {
2517 .name = "ufshcd-qcom",
2518 .pm = &ufs_qcom_pm_ops,
2519 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2520 },
2521};
2522module_platform_driver(ufs_qcom_pltform);
2523
Yaniv Gardifb819ee2015-10-28 13:15:45 +02002524MODULE_LICENSE("GPL v2");