blob: 966bacf522c34fa176354ece361750b3ab22ff82 [file] [log] [blame]
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001/*
Yaniv Gardi54b879b2016-03-10 17:37:05 +02002 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/phy/phy.h>
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020019#include <linux/phy/phy-qcom-ufs.h>
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +020020
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020021#include "ufshcd.h"
Yaniv Gardi47555a52015-10-28 13:15:49 +020022#include "ufshcd-pltfrm.h"
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020023#include "unipro.h"
24#include "ufs-qcom.h"
25#include "ufshci.h"
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020026#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
27 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
28
29enum {
30 TSTBUS_UAWM,
31 TSTBUS_UARM,
32 TSTBUS_TXUC,
33 TSTBUS_RXUC,
34 TSTBUS_DFC,
35 TSTBUS_TRLUT,
36 TSTBUS_TMRLUT,
37 TSTBUS_OCSC,
38 TSTBUS_UTP_HCI,
39 TSTBUS_COMBINED,
40 TSTBUS_WRAPPER,
41 TSTBUS_UNIPRO,
42 TSTBUS_MAX,
43};
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020044
45static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
46
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020047static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020048static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +020049static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
50 u32 clk_cycles);
51
Yaniv Gardi6e3fd442015-10-28 13:15:50 +020052static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
53 char *prefix)
54{
55 print_hex_dump(KERN_ERR, prefix,
56 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
57 16, 4, (void __force *)hba->mmio_base + offset,
58 len * 4, false);
59}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +020060
61static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
62{
63 int err = 0;
64
65 err = ufshcd_dme_get(hba,
66 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
67 if (err)
68 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
69 __func__, err);
70
71 return err;
72}
73
74static int ufs_qcom_host_clk_get(struct device *dev,
75 const char *name, struct clk **clk_out)
76{
77 struct clk *clk;
78 int err = 0;
79
80 clk = devm_clk_get(dev, name);
81 if (IS_ERR(clk)) {
82 err = PTR_ERR(clk);
83 dev_err(dev, "%s: failed to get %s err %d",
84 __func__, name, err);
85 } else {
86 *clk_out = clk;
87 }
88
89 return err;
90}
91
92static int ufs_qcom_host_clk_enable(struct device *dev,
93 const char *name, struct clk *clk)
94{
95 int err = 0;
96
97 err = clk_prepare_enable(clk);
98 if (err)
99 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
100
101 return err;
102}
103
104static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
105{
106 if (!host->is_lane_clks_enabled)
107 return;
108
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200109 if (host->hba->lanes_per_direction > 1)
110 clk_disable_unprepare(host->tx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200111 clk_disable_unprepare(host->tx_l0_sync_clk);
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200112 if (host->hba->lanes_per_direction > 1)
113 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200114 clk_disable_unprepare(host->rx_l0_sync_clk);
115
116 host->is_lane_clks_enabled = false;
117}
118
119static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
120{
121 int err = 0;
122 struct device *dev = host->hba->dev;
123
124 if (host->is_lane_clks_enabled)
125 return 0;
126
127 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
128 host->rx_l0_sync_clk);
129 if (err)
130 goto out;
131
132 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
133 host->tx_l0_sync_clk);
134 if (err)
135 goto disable_rx_l0;
136
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200137 if (host->hba->lanes_per_direction > 1) {
138 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
139 host->rx_l1_sync_clk);
140 if (err)
141 goto disable_tx_l0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200142
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200143 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
144 host->tx_l1_sync_clk);
145 if (err)
146 goto disable_rx_l1;
147 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200148
149 host->is_lane_clks_enabled = true;
150 goto out;
151
152disable_rx_l1:
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200153 if (host->hba->lanes_per_direction > 1)
154 clk_disable_unprepare(host->rx_l1_sync_clk);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200155disable_tx_l0:
156 clk_disable_unprepare(host->tx_l0_sync_clk);
157disable_rx_l0:
158 clk_disable_unprepare(host->rx_l0_sync_clk);
159out:
160 return err;
161}
162
163static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
164{
165 int err = 0;
166 struct device *dev = host->hba->dev;
167
168 err = ufs_qcom_host_clk_get(dev,
169 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
170 if (err)
171 goto out;
172
173 err = ufs_qcom_host_clk_get(dev,
174 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
175 if (err)
176 goto out;
177
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200178 /* In case of single lane per direction, don't read lane1 clocks */
179 if (host->hba->lanes_per_direction > 1) {
180 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
181 &host->rx_l1_sync_clk);
182 if (err)
183 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200184
Yaniv Gardi54b879b2016-03-10 17:37:05 +0200185 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
186 &host->tx_l1_sync_clk);
187 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200188out:
189 return err;
190}
191
192static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
193{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200194 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200195 struct phy *phy = host->generic_phy;
196 u32 tx_lanes;
197 int err = 0;
198
199 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
200 if (err)
201 goto out;
202
203 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
204 if (err)
205 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
206 __func__);
207
208out:
209 return err;
210}
211
212static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
213{
214 int err;
215 u32 tx_fsm_val = 0;
216 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
217
218 do {
219 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200220 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
221 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
222 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200223 if (err || tx_fsm_val == TX_FSM_HIBERN8)
224 break;
225
226 /* sleep for max. 200us */
227 usleep_range(100, 200);
228 } while (time_before(jiffies, timeout));
229
230 /*
231 * we might have scheduled out for long during polling so
232 * check the state again.
233 */
234 if (time_after(jiffies, timeout))
235 err = ufshcd_dme_get(hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200236 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
237 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
238 &tx_fsm_val);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200239
240 if (err) {
241 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
242 __func__, err);
243 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
244 err = tx_fsm_val;
245 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
246 __func__, err);
247 }
248
249 return err;
250}
251
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200252static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
253{
254 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
255 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
256 REG_UFS_CFG1);
257 /* make sure above configuration is applied before we return */
258 mb();
259}
260
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200261static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
262{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200263 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200264 struct phy *phy = host->generic_phy;
265 int ret = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200266 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
267 ? true : false;
268
269 /* Assert PHY reset and apply PHY calibration values */
270 ufs_qcom_assert_reset(hba);
271 /* provide 1ms delay to let the reset pulse propagate */
272 usleep_range(1000, 1100);
273
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200274 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200275
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200276 if (ret) {
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200277 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
278 __func__, ret);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200279 goto out;
280 }
281
282 /* De-assert PHY reset and start serdes */
283 ufs_qcom_deassert_reset(hba);
284
285 /*
286 * after reset deassertion, phy will need all ref clocks,
287 * voltage, current to settle down before starting serdes.
288 */
289 usleep_range(1000, 1100);
290 ret = ufs_qcom_phy_start_serdes(phy);
291 if (ret) {
292 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
293 __func__, ret);
294 goto out;
295 }
296
297 ret = ufs_qcom_phy_is_pcs_ready(phy);
298 if (ret)
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200299 dev_err(hba->dev,
300 "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200301 __func__, ret);
302
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200303 ufs_qcom_select_unipro_mode(host);
304
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200305out:
306 return ret;
307}
308
309/*
310 * The UTP controller has a number of internal clock gating cells (CGCs).
311 * Internal hardware sub-modules within the UTP controller control the CGCs.
312 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
313 * in a specific operation, UTP controller CGCs are by default disabled and
314 * this function enables them (after every UFS link startup) to save some power
315 * leakage.
316 */
317static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
318{
319 ufshcd_writel(hba,
320 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
321 REG_UFS_CFG2);
322
323 /* Ensure that HW clock gating is enabled before next operations */
324 mb();
325}
326
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200327static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
328 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200329{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200330 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200331 int err = 0;
332
333 switch (status) {
334 case PRE_CHANGE:
335 ufs_qcom_power_up_sequence(hba);
336 /*
337 * The PHY PLL output is the source of tx/rx lane symbol
338 * clocks, hence, enable the lane clocks only after PHY
339 * is initialized.
340 */
341 err = ufs_qcom_enable_lane_clks(host);
342 break;
343 case POST_CHANGE:
344 /* check if UFS PHY moved from DISABLED to HIBERN8 */
345 err = ufs_qcom_check_hibern8(hba);
346 ufs_qcom_enable_hw_clk_gating(hba);
347
348 break;
349 default:
350 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
351 err = -EINVAL;
352 break;
353 }
354 return err;
355}
356
357/**
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200358 * Returns zero for success and non-zero in case of a failure
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200359 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200360static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
361 u32 hs, u32 rate, bool update_link_startup_timer)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200362{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200363 int ret = 0;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200364 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200365 struct ufs_clk_info *clki;
366 u32 core_clk_period_in_ns;
367 u32 tx_clk_cycles_per_us = 0;
368 unsigned long core_clk_rate = 0;
369 u32 core_clk_cycles_per_us = 0;
370
371 static u32 pwm_fr_table[][2] = {
372 {UFS_PWM_G1, 0x1},
373 {UFS_PWM_G2, 0x1},
374 {UFS_PWM_G3, 0x1},
375 {UFS_PWM_G4, 0x1},
376 };
377
378 static u32 hs_fr_table_rA[][2] = {
379 {UFS_HS_G1, 0x1F},
380 {UFS_HS_G2, 0x3e},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200381 {UFS_HS_G3, 0x7D},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200382 };
383
384 static u32 hs_fr_table_rB[][2] = {
385 {UFS_HS_G1, 0x24},
386 {UFS_HS_G2, 0x49},
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200387 {UFS_HS_G3, 0x92},
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200388 };
389
Yaniv Gardi81c7e062015-05-17 18:54:58 +0300390 /*
391 * The Qunipro controller does not use following registers:
392 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
393 * UFS_REG_PA_LINK_STARTUP_TIMER
394 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
395 * Aggregation logic.
396 */
397 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
398 goto out;
399
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200400 if (gear == 0) {
401 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
402 goto out_error;
403 }
404
405 list_for_each_entry(clki, &hba->clk_list_head, list) {
406 if (!strcmp(clki->name, "core_clk"))
407 core_clk_rate = clk_get_rate(clki->clk);
408 }
409
410 /* If frequency is smaller than 1MHz, set to 1MHz */
411 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
412 core_clk_rate = DEFAULT_CLK_RATE_HZ;
413
414 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200415 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
416 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
417 /*
418 * make sure above write gets applied before we return from
419 * this function.
420 */
421 mb();
422 }
423
424 if (ufs_qcom_cap_qunipro(host))
425 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200426
427 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
428 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
429 core_clk_period_in_ns &= MASK_CLK_NS_REG;
430
431 switch (hs) {
432 case FASTAUTO_MODE:
433 case FAST_MODE:
434 if (rate == PA_HS_MODE_A) {
435 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
436 dev_err(hba->dev,
437 "%s: index %d exceeds table size %zu\n",
438 __func__, gear,
439 ARRAY_SIZE(hs_fr_table_rA));
440 goto out_error;
441 }
442 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
443 } else if (rate == PA_HS_MODE_B) {
444 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
445 dev_err(hba->dev,
446 "%s: index %d exceeds table size %zu\n",
447 __func__, gear,
448 ARRAY_SIZE(hs_fr_table_rB));
449 goto out_error;
450 }
451 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
452 } else {
453 dev_err(hba->dev, "%s: invalid rate = %d\n",
454 __func__, rate);
455 goto out_error;
456 }
457 break;
458 case SLOWAUTO_MODE:
459 case SLOW_MODE:
460 if (gear > ARRAY_SIZE(pwm_fr_table)) {
461 dev_err(hba->dev,
462 "%s: index %d exceeds table size %zu\n",
463 __func__, gear,
464 ARRAY_SIZE(pwm_fr_table));
465 goto out_error;
466 }
467 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
468 break;
469 case UNCHANGED:
470 default:
471 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
472 goto out_error;
473 }
474
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200475 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
476 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
477 /* this register 2 fields shall be written at once */
478 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
479 REG_UFS_TX_SYMBOL_CLK_NS_US);
480 /*
481 * make sure above write gets applied before we return from
482 * this function.
483 */
484 mb();
485 }
486
487 if (update_link_startup_timer) {
488 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
489 REG_UFS_PA_LINK_STARTUP_TIMER);
490 /*
491 * make sure that this configuration is applied before
492 * we return
493 */
494 mb();
495 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200496 goto out;
497
498out_error:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200499 ret = -EINVAL;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200500out:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200501 return ret;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200502}
503
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200504static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
505 enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200506{
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200507 int err = 0;
508 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200509
510 switch (status) {
511 case PRE_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200512 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
513 0, true)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200514 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
515 __func__);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200516 err = -EINVAL;
517 goto out;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200518 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200519
520 if (ufs_qcom_cap_qunipro(host))
521 /*
522 * set unipro core clock cycles to 150 & clear clock
523 * divider
524 */
525 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
526 150);
527
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +0200528 /*
529 * Some UFS devices (and may be host) have issues if LCC is
530 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
531 * before link startup which will make sure that both host
532 * and device TX LCC are disabled once link startup is
533 * completed.
534 */
535 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
536 err = ufshcd_dme_set(hba,
537 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
538 0);
539
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200540 break;
541 case POST_CHANGE:
542 ufs_qcom_link_startup_post_change(hba);
543 break;
544 default:
545 break;
546 }
547
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200548out:
549 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200550}
551
552static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
553{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200554 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200555 struct phy *phy = host->generic_phy;
556 int ret = 0;
557
558 if (ufs_qcom_is_link_off(hba)) {
559 /*
560 * Disable the tx/rx lane symbol clocks before PHY is
561 * powered down as the PLL source should be disabled
562 * after downstream clocks are disabled.
563 */
564 ufs_qcom_disable_lane_clks(host);
565 phy_power_off(phy);
566
567 /* Assert PHY soft reset */
568 ufs_qcom_assert_reset(hba);
569 goto out;
570 }
571
572 /*
573 * If UniPro link is not active, PHY ref_clk, main PHY analog power
574 * rail and low noise analog power rail for PLL can be switched off.
575 */
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200576 if (!ufs_qcom_is_link_active(hba)) {
577 ufs_qcom_disable_lane_clks(host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200578 phy_power_off(phy);
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200579 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200580
581out:
582 return ret;
583}
584
585static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
586{
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200587 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200588 struct phy *phy = host->generic_phy;
589 int err;
590
591 err = phy_power_on(phy);
592 if (err) {
593 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
594 __func__, err);
595 goto out;
596 }
597
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200598 err = ufs_qcom_enable_lane_clks(host);
599 if (err)
600 goto out;
601
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200602 hba->is_sys_suspended = false;
603
604out:
605 return err;
606}
607
608struct ufs_qcom_dev_params {
609 u32 pwm_rx_gear; /* pwm rx gear to work in */
610 u32 pwm_tx_gear; /* pwm tx gear to work in */
611 u32 hs_rx_gear; /* hs rx gear to work in */
612 u32 hs_tx_gear; /* hs tx gear to work in */
613 u32 rx_lanes; /* number of rx lanes */
614 u32 tx_lanes; /* number of tx lanes */
615 u32 rx_pwr_pwm; /* rx pwm working pwr */
616 u32 tx_pwr_pwm; /* tx pwm working pwr */
617 u32 rx_pwr_hs; /* rx hs working pwr */
618 u32 tx_pwr_hs; /* tx hs working pwr */
619 u32 hs_rate; /* rate A/B to work in HS */
620 u32 desired_working_mode;
621};
622
623static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
624 struct ufs_pa_layer_attr *dev_max,
625 struct ufs_pa_layer_attr *agreed_pwr)
626{
627 int min_qcom_gear;
628 int min_dev_gear;
629 bool is_dev_sup_hs = false;
630 bool is_qcom_max_hs = false;
631
632 if (dev_max->pwr_rx == FAST_MODE)
633 is_dev_sup_hs = true;
634
635 if (qcom_param->desired_working_mode == FAST) {
636 is_qcom_max_hs = true;
637 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
638 qcom_param->hs_tx_gear);
639 } else {
640 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
641 qcom_param->pwm_tx_gear);
642 }
643
644 /*
645 * device doesn't support HS but qcom_param->desired_working_mode is
646 * HS, thus device and qcom_param don't agree
647 */
648 if (!is_dev_sup_hs && is_qcom_max_hs) {
649 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
650 __func__);
651 return -ENOTSUPP;
652 } else if (is_dev_sup_hs && is_qcom_max_hs) {
653 /*
654 * since device supports HS, it supports FAST_MODE.
655 * since qcom_param->desired_working_mode is also HS
656 * then final decision (FAST/FASTAUTO) is done according
657 * to qcom_params as it is the restricting factor
658 */
659 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
660 qcom_param->rx_pwr_hs;
661 } else {
662 /*
663 * here qcom_param->desired_working_mode is PWM.
664 * it doesn't matter whether device supports HS or PWM,
665 * in both cases qcom_param->desired_working_mode will
666 * determine the mode
667 */
668 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
669 qcom_param->rx_pwr_pwm;
670 }
671
672 /*
673 * we would like tx to work in the minimum number of lanes
674 * between device capability and vendor preferences.
675 * the same decision will be made for rx
676 */
677 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
678 qcom_param->tx_lanes);
679 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
680 qcom_param->rx_lanes);
681
682 /* device maximum gear is the minimum between device rx and tx gears */
683 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
684
685 /*
686 * if both device capabilities and vendor pre-defined preferences are
687 * both HS or both PWM then set the minimum gear to be the chosen
688 * working gear.
689 * if one is PWM and one is HS then the one that is PWM get to decide
690 * what is the gear, as it is the one that also decided previously what
691 * pwr the device will be configured to.
692 */
693 if ((is_dev_sup_hs && is_qcom_max_hs) ||
694 (!is_dev_sup_hs && !is_qcom_max_hs))
695 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
696 min_t(u32, min_dev_gear, min_qcom_gear);
697 else if (!is_dev_sup_hs)
698 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
699 else
700 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
701
702 agreed_pwr->hs_rate = qcom_param->hs_rate;
703 return 0;
704}
705
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200706#ifdef CONFIG_MSM_BUS_SCALING
707static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
708 const char *speed_mode)
709{
710 struct device *dev = host->hba->dev;
711 struct device_node *np = dev->of_node;
712 int err;
713 const char *key = "qcom,bus-vector-names";
714
715 if (!speed_mode) {
716 err = -EINVAL;
717 goto out;
718 }
719
720 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
721 err = of_property_match_string(np, key, "MAX");
722 else
723 err = of_property_match_string(np, key, speed_mode);
724
725out:
726 if (err < 0)
727 dev_err(dev, "%s: Invalid %s mode %d\n",
728 __func__, speed_mode, err);
729 return err;
730}
731
732static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
733{
734 int gear = max_t(u32, p->gear_rx, p->gear_tx);
735 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
736 int pwr;
737
738 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
739 if (!gear)
740 gear = 1;
741
742 if (!lanes)
743 lanes = 1;
744
745 if (!p->pwr_rx && !p->pwr_tx) {
746 pwr = SLOWAUTO_MODE;
747 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
748 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
749 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
750 pwr = FAST_MODE;
751 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
752 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
753 } else {
754 pwr = SLOW_MODE;
755 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
756 "PWM", gear, lanes);
757 }
758}
759
760static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
761{
762 int err = 0;
763
764 if (vote != host->bus_vote.curr_vote) {
765 err = msm_bus_scale_client_update_request(
766 host->bus_vote.client_handle, vote);
767 if (err) {
768 dev_err(host->hba->dev,
769 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
770 __func__, host->bus_vote.client_handle,
771 vote, err);
772 goto out;
773 }
774
775 host->bus_vote.curr_vote = vote;
776 }
777out:
778 return err;
779}
780
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200781static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
782{
783 int vote;
784 int err = 0;
785 char mode[BUS_VECTOR_NAME_LEN];
786
787 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
788
789 vote = ufs_qcom_get_bus_vote(host, mode);
790 if (vote >= 0)
791 err = ufs_qcom_set_bus_vote(host, vote);
792 else
793 err = vote;
794
795 if (err)
796 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
797 else
798 host->bus_vote.saved_vote = vote;
799 return err;
800}
801
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200802static ssize_t
803show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
804 char *buf)
805{
806 struct ufs_hba *hba = dev_get_drvdata(dev);
807 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
808
809 return snprintf(buf, PAGE_SIZE, "%u\n",
810 host->bus_vote.is_max_bw_needed);
811}
812
813static ssize_t
814store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
815 const char *buf, size_t count)
816{
817 struct ufs_hba *hba = dev_get_drvdata(dev);
818 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
819 uint32_t value;
820
821 if (!kstrtou32(buf, 0, &value)) {
822 host->bus_vote.is_max_bw_needed = !!value;
823 ufs_qcom_update_bus_bw_vote(host);
824 }
825
826 return count;
827}
828
829static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
830{
831 int err;
832 struct msm_bus_scale_pdata *bus_pdata;
833 struct device *dev = host->hba->dev;
834 struct platform_device *pdev = to_platform_device(dev);
835 struct device_node *np = dev->of_node;
836
837 bus_pdata = msm_bus_cl_get_pdata(pdev);
838 if (!bus_pdata) {
839 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
840 err = -ENODATA;
841 goto out;
842 }
843
844 err = of_property_count_strings(np, "qcom,bus-vector-names");
845 if (err < 0 || err != bus_pdata->num_usecases) {
846 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
847 __func__, err);
848 goto out;
849 }
850
851 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
852 if (!host->bus_vote.client_handle) {
853 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
854 __func__);
855 err = -EFAULT;
856 goto out;
857 }
858
859 /* cache the vote index for minimum and maximum bandwidth */
860 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
861 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
862
863 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
864 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
865 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
866 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
867 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
868 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
869out:
870 return err;
871}
872#else /* CONFIG_MSM_BUS_SCALING */
873static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
874{
875 return 0;
876}
877
878static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
879{
880 return 0;
881}
882
883static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
884{
885 return 0;
886}
887#endif /* CONFIG_MSM_BUS_SCALING */
888
889static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
890{
891 if (host->dev_ref_clk_ctrl_mmio &&
892 (enable ^ host->is_dev_ref_clk_enabled)) {
893 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
894
895 if (enable)
896 temp |= host->dev_ref_clk_en_mask;
897 else
898 temp &= ~host->dev_ref_clk_en_mask;
899
900 /*
901 * If we are here to disable this clock it might be immediately
902 * after entering into hibern8 in which case we need to make
903 * sure that device ref_clk is active at least 1us after the
904 * hibern8 enter.
905 */
906 if (!enable)
907 udelay(1);
908
909 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
910
911 /* ensure that ref_clk is enabled/disabled before we return */
912 wmb();
913
914 /*
915 * If we call hibern8 exit after this, we need to make sure that
916 * device ref_clk is stable for at least 1us before the hibern8
917 * exit command.
918 */
919 if (enable)
920 udelay(1);
921
922 host->is_dev_ref_clk_enabled = enable;
923 }
924}
925
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200926static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200927 enum ufs_notify_change_status status,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200928 struct ufs_pa_layer_attr *dev_max_params,
929 struct ufs_pa_layer_attr *dev_req_params)
930{
931 u32 val;
Yaniv Gardi1ce58982015-10-28 13:15:47 +0200932 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200933 struct phy *phy = host->generic_phy;
934 struct ufs_qcom_dev_params ufs_qcom_cap;
935 int ret = 0;
936 int res = 0;
937
938 if (!dev_req_params) {
939 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
940 ret = -EINVAL;
941 goto out;
942 }
943
944 switch (status) {
945 case PRE_CHANGE:
946 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
947 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
948 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
949 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
950 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
951 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
952 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
953 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
954 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
955 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
956 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
957 ufs_qcom_cap.desired_working_mode =
958 UFS_QCOM_LIMIT_DESIRED_MODE;
959
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200960 if (host->hw_ver.major == 0x1) {
961 /*
962 * HS-G3 operations may not reliably work on legacy QCOM
963 * UFS host controller hardware even though capability
964 * exchange during link startup phase may end up
965 * negotiating maximum supported gear as G3.
966 * Hence downgrade the maximum supported gear to HS-G2.
967 */
968 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
969 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
970 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
971 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
972 }
973
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200974 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
975 dev_max_params,
976 dev_req_params);
977 if (ret) {
978 pr_err("%s: failed to determine capabilities\n",
979 __func__);
980 goto out;
981 }
982
983 break;
984 case POST_CHANGE:
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200985 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200986 dev_req_params->pwr_rx,
Yaniv Gardif06fcc72015-10-28 13:15:51 +0200987 dev_req_params->hs_rate, false)) {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +0200988 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
989 __func__);
990 /*
991 * we return error code at the end of the routine,
992 * but continue to configure UFS_PHY_TX_LANE_ENABLE
993 * and bus voting as usual
994 */
995 ret = -EINVAL;
996 }
997
998 val = ~(MAX_U32 << dev_req_params->lane_tx);
999 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1000 if (res) {
1001 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1002 __func__, res);
1003 ret = res;
1004 }
1005
1006 /* cache the power mode parameters to use internally */
1007 memcpy(&host->dev_req_params,
1008 dev_req_params, sizeof(*dev_req_params));
1009 ufs_qcom_update_bus_bw_vote(host);
1010 break;
1011 default:
1012 ret = -EINVAL;
1013 break;
1014 }
1015out:
1016 return ret;
1017}
1018
Yaniv Gardiae977582015-05-17 18:55:06 +03001019static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1020{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001021 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardiae977582015-05-17 18:55:06 +03001022
1023 if (host->hw_ver.major == 0x1)
1024 return UFSHCI_VERSION_11;
1025 else
1026 return UFSHCI_VERSION_20;
1027}
1028
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001029/**
1030 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1031 * @hba: host controller instance
1032 *
1033 * QCOM UFS host controller might have some non standard behaviours (quirks)
1034 * than what is specified by UFSHCI specification. Advertise all such
1035 * quirks to standard UFS host controller driver so standard takes them into
1036 * account.
1037 */
1038static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1039{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001040 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001041
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001042 if (host->hw_ver.major == 0x01) {
Yaniv Gardi81637432015-05-17 18:55:02 +03001043 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001044 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1045 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001046
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001047 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1048 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001049
1050 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
Yaniv Gardi81c7e062015-05-17 18:54:58 +03001051 }
1052
Yaniv Gardicad2e032015-03-31 17:37:14 +03001053 if (host->hw_ver.major >= 0x2) {
Yaniv Gardiae977582015-05-17 18:55:06 +03001054 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
Yaniv Gardi2f018372015-05-17 18:55:00 +03001055
Yaniv Gardicad2e032015-03-31 17:37:14 +03001056 if (!ufs_qcom_cap_qunipro(host))
1057 /* Legacy UniPro mode still need following quirks */
Yaniv Gardi81637432015-05-17 18:55:02 +03001058 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
Yaniv Gardi2c0cc2e2015-05-17 18:55:04 +03001059 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
Yaniv Gardi81637432015-05-17 18:55:02 +03001060 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001061 }
1062}
1063
1064static void ufs_qcom_set_caps(struct ufs_hba *hba)
1065{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001066 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001067
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001068 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1069 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1070 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001071
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001072 if (host->hw_ver.major >= 0x2) {
1073 host->caps = UFS_QCOM_CAP_QUNIPRO |
1074 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001075 }
1076}
1077
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001078/**
1079 * ufs_qcom_setup_clocks - enables/disable clocks
1080 * @hba: host controller instance
1081 * @on: If true, enable clocks else disable them.
1082 *
1083 * Returns 0 on success, non-zero on failure.
1084 */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001085static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1086{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001087 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001088 int err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001089 int vote = 0;
1090
1091 /*
1092 * In case ufs_qcom_init() is not yet done, simply ignore.
1093 * This ufs_qcom_setup_clocks() shall be called from
1094 * ufs_qcom_init() after init is done.
1095 */
1096 if (!host)
1097 return 0;
1098
1099 if (on) {
1100 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1101 if (err)
1102 goto out;
1103
1104 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1105 if (err) {
1106 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1107 __func__, err);
1108 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1109 goto out;
1110 }
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001111 vote = host->bus_vote.saved_vote;
1112 if (vote == host->bus_vote.min_bw_vote)
1113 ufs_qcom_update_bus_bw_vote(host);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001114
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001115 } else {
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001116
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001117 /* M-PHY RMMI interface clocks can be turned off */
1118 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001119 if (!ufs_qcom_is_link_active(hba))
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001120 /* disable device ref_clk */
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001121 ufs_qcom_dev_ref_clk_ctrl(host, false);
1122
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001123 vote = host->bus_vote.min_bw_vote;
1124 }
1125
1126 err = ufs_qcom_set_bus_vote(host, vote);
1127 if (err)
1128 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1129 __func__, err);
1130
1131out:
1132 return err;
1133}
1134
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001135#define ANDROID_BOOT_DEV_MAX 30
1136static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
Yaniv Gardifb819ee2015-10-28 13:15:45 +02001137
1138#ifndef MODULE
1139static int __init get_android_boot_dev(char *str)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001140{
1141 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1142 return 1;
1143}
1144__setup("androidboot.bootdevice=", get_android_boot_dev);
Yaniv Gardifb819ee2015-10-28 13:15:45 +02001145#endif
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001146
1147/**
1148 * ufs_qcom_init - bind phy with controller
1149 * @hba: host controller instance
1150 *
1151 * Binds PHY with controller and powers up PHY enabling clocks
1152 * and regulators.
1153 *
1154 * Returns -EPROBE_DEFER if binding fails, returns negative error
1155 * on phy power up failure and returns zero on success.
1156 */
1157static int ufs_qcom_init(struct ufs_hba *hba)
1158{
1159 int err;
1160 struct device *dev = hba->dev;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001161 struct platform_device *pdev = to_platform_device(dev);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001162 struct ufs_qcom_host *host;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001163 struct resource *res;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001164
1165 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1166 return -ENODEV;
1167
1168 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1169 if (!host) {
1170 err = -ENOMEM;
1171 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1172 goto out;
1173 }
1174
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001175 /* Make a two way bind between the qcom host and the hba */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001176 host->hba = hba;
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001177 ufshcd_set_variant(hba, host);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001178
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001179 /*
1180 * voting/devoting device ref_clk source is time consuming hence
1181 * skip devoting it during aggressive clock gating. This clock
1182 * will still be gated off during runtime suspend.
1183 */
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001184 host->generic_phy = devm_phy_get(dev, "ufsphy");
1185
1186 if (IS_ERR(host->generic_phy)) {
1187 err = PTR_ERR(host->generic_phy);
1188 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1189 goto out;
1190 }
1191
1192 err = ufs_qcom_bus_register(host);
1193 if (err)
1194 goto out_host_free;
1195
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03001196 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1197 &host->hw_ver.minor, &host->hw_ver.step);
1198
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001199 /*
1200 * for newer controllers, device reference clock control bit has
1201 * moved inside UFS controller register address space itself.
1202 */
1203 if (host->hw_ver.major >= 0x02) {
1204 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1205 host->dev_ref_clk_en_mask = BIT(26);
1206 } else {
1207 /* "dev_ref_clk_ctrl_mem" is optional resource */
1208 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1209 if (res) {
1210 host->dev_ref_clk_ctrl_mmio =
1211 devm_ioremap_resource(dev, res);
1212 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1213 dev_warn(dev,
1214 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1215 __func__,
1216 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1217 host->dev_ref_clk_ctrl_mmio = NULL;
1218 }
1219 host->dev_ref_clk_en_mask = BIT(5);
1220 }
1221 }
1222
Yaniv Gardibfdbe8b2015-03-31 17:37:13 +03001223 /* update phy revision information before calling phy_init() */
1224 ufs_qcom_phy_save_controller_version(host->generic_phy,
1225 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1226
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001227 phy_init(host->generic_phy);
1228 err = phy_power_on(host->generic_phy);
1229 if (err)
1230 goto out_unregister_bus;
1231
1232 err = ufs_qcom_init_lane_clks(host);
1233 if (err)
1234 goto out_disable_phy;
1235
Yaniv Gardicad2e032015-03-31 17:37:14 +03001236 ufs_qcom_set_caps(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001237 ufs_qcom_advertise_quirks(hba);
1238
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001239 ufs_qcom_setup_clocks(hba, true);
1240
1241 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1242 ufs_qcom_hosts[hba->dev->id] = host;
1243
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001244 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1245 ufs_qcom_get_default_testbus_cfg(host);
1246 err = ufs_qcom_testbus_config(host);
1247 if (err) {
1248 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1249 __func__, err);
1250 err = 0;
1251 }
1252
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001253 goto out;
1254
1255out_disable_phy:
1256 phy_power_off(host->generic_phy);
1257out_unregister_bus:
1258 phy_exit(host->generic_phy);
1259out_host_free:
1260 devm_kfree(dev, host);
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001261 ufshcd_set_variant(hba, NULL);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001262out:
1263 return err;
1264}
1265
1266static void ufs_qcom_exit(struct ufs_hba *hba)
1267{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001268 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001269
1270 ufs_qcom_disable_lane_clks(host);
1271 phy_power_off(host->generic_phy);
1272}
1273
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001274static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1275 u32 clk_cycles)
1276{
1277 int err;
1278 u32 core_clk_ctrl_reg;
1279
1280 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1281 return -EINVAL;
1282
1283 err = ufshcd_dme_get(hba,
1284 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1285 &core_clk_ctrl_reg);
1286 if (err)
1287 goto out;
1288
1289 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1290 core_clk_ctrl_reg |= clk_cycles;
1291
1292 /* Clear CORE_CLK_DIV_EN */
1293 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1294
1295 err = ufshcd_dme_set(hba,
1296 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1297 core_clk_ctrl_reg);
1298out:
1299 return err;
1300}
1301
1302static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1303{
1304 /* nothing to do as of now */
1305 return 0;
1306}
1307
1308static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1309{
1310 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1311
1312 if (!ufs_qcom_cap_qunipro(host))
1313 return 0;
1314
1315 /* set unipro core clock cycles to 150 and clear clock divider */
1316 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1317}
1318
1319static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1320{
1321 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1322 int err;
1323 u32 core_clk_ctrl_reg;
1324
1325 if (!ufs_qcom_cap_qunipro(host))
1326 return 0;
1327
1328 err = ufshcd_dme_get(hba,
1329 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1330 &core_clk_ctrl_reg);
1331
1332 /* make sure CORE_CLK_DIV_EN is cleared */
1333 if (!err &&
1334 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1335 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1336 err = ufshcd_dme_set(hba,
1337 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1338 core_clk_ctrl_reg);
1339 }
1340
1341 return err;
1342}
1343
1344static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1345{
1346 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1347
1348 if (!ufs_qcom_cap_qunipro(host))
1349 return 0;
1350
1351 /* set unipro core clock cycles to 75 and clear clock divider */
1352 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1353}
1354
1355static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1356 bool scale_up, enum ufs_notify_change_status status)
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001357{
Yaniv Gardi1ce58982015-10-28 13:15:47 +02001358 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001359 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001360 int err = 0;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001361
Yaniv Gardif06fcc72015-10-28 13:15:51 +02001362 if (status == PRE_CHANGE) {
1363 if (scale_up)
1364 err = ufs_qcom_clk_scale_up_pre_change(hba);
1365 else
1366 err = ufs_qcom_clk_scale_down_pre_change(hba);
1367 } else {
1368 if (scale_up)
1369 err = ufs_qcom_clk_scale_up_post_change(hba);
1370 else
1371 err = ufs_qcom_clk_scale_down_post_change(hba);
1372
1373 if (err || !dev_req_params)
1374 goto out;
1375
1376 ufs_qcom_cfg_timers(hba,
1377 dev_req_params->gear_rx,
1378 dev_req_params->pwr_rx,
1379 dev_req_params->hs_rate,
1380 false);
1381 ufs_qcom_update_bus_bw_vote(host);
1382 }
1383
1384out:
1385 return err;
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001386}
1387
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001388static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1389{
1390 /* provide a legal default configuration */
1391 host->testbus.select_major = TSTBUS_UAWM;
1392 host->testbus.select_minor = 1;
1393}
1394
1395static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1396{
1397 if (host->testbus.select_major >= TSTBUS_MAX) {
1398 dev_err(host->hba->dev,
1399 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1400 __func__, host->testbus.select_major);
1401 return false;
1402 }
1403
1404 /*
1405 * Not performing check for each individual select_major
1406 * mappings of select_minor, since there is no harm in
1407 * configuring a non-existent select_minor
1408 */
1409 if (host->testbus.select_minor > 0x1F) {
1410 dev_err(host->hba->dev,
1411 "%s: 0x%05X is not a legal testbus option\n",
1412 __func__, host->testbus.select_minor);
1413 return false;
1414 }
1415
1416 return true;
1417}
1418
1419int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1420{
1421 int reg;
1422 int offset;
1423 u32 mask = TEST_BUS_SUB_SEL_MASK;
1424
1425 if (!host)
1426 return -EINVAL;
1427
1428 if (!ufs_qcom_testbus_cfg_is_ok(host))
1429 return -EPERM;
1430
1431 switch (host->testbus.select_major) {
1432 case TSTBUS_UAWM:
1433 reg = UFS_TEST_BUS_CTRL_0;
1434 offset = 24;
1435 break;
1436 case TSTBUS_UARM:
1437 reg = UFS_TEST_BUS_CTRL_0;
1438 offset = 16;
1439 break;
1440 case TSTBUS_TXUC:
1441 reg = UFS_TEST_BUS_CTRL_0;
1442 offset = 8;
1443 break;
1444 case TSTBUS_RXUC:
1445 reg = UFS_TEST_BUS_CTRL_0;
1446 offset = 0;
1447 break;
1448 case TSTBUS_DFC:
1449 reg = UFS_TEST_BUS_CTRL_1;
1450 offset = 24;
1451 break;
1452 case TSTBUS_TRLUT:
1453 reg = UFS_TEST_BUS_CTRL_1;
1454 offset = 16;
1455 break;
1456 case TSTBUS_TMRLUT:
1457 reg = UFS_TEST_BUS_CTRL_1;
1458 offset = 8;
1459 break;
1460 case TSTBUS_OCSC:
1461 reg = UFS_TEST_BUS_CTRL_1;
1462 offset = 0;
1463 break;
1464 case TSTBUS_WRAPPER:
1465 reg = UFS_TEST_BUS_CTRL_2;
1466 offset = 16;
1467 break;
1468 case TSTBUS_COMBINED:
1469 reg = UFS_TEST_BUS_CTRL_2;
1470 offset = 8;
1471 break;
1472 case TSTBUS_UTP_HCI:
1473 reg = UFS_TEST_BUS_CTRL_2;
1474 offset = 0;
1475 break;
1476 case TSTBUS_UNIPRO:
1477 reg = UFS_UNIPRO_CFG;
1478 offset = 1;
1479 break;
1480 /*
1481 * No need for a default case, since
1482 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1483 * is legal
1484 */
1485 }
1486 mask <<= offset;
1487
1488 pm_runtime_get_sync(host->hba->dev);
1489 ufshcd_hold(host->hba, false);
1490 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1491 (u32)host->testbus.select_major << 19,
1492 REG_UFS_CFG1);
1493 ufshcd_rmwl(host->hba, mask,
1494 (u32)host->testbus.select_minor << offset,
1495 reg);
1496 ufshcd_release(host->hba);
1497 pm_runtime_put_sync(host->hba->dev);
1498
1499 return 0;
1500}
1501
1502static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1503{
1504 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1505}
1506
1507static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1508{
1509 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1510 "HCI Vendor Specific Registers ");
1511
1512 ufs_qcom_testbus_read(hba);
1513}
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001514/**
1515 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1516 *
1517 * The variant operations configure the necessary controller and PHY
1518 * handshake during initialization.
1519 */
Yaniv Gardi47555a52015-10-28 13:15:49 +02001520static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001521 .name = "qcom",
1522 .init = ufs_qcom_init,
1523 .exit = ufs_qcom_exit,
Yaniv Gardiae977582015-05-17 18:55:06 +03001524 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001525 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1526 .setup_clocks = ufs_qcom_setup_clocks,
1527 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1528 .link_startup_notify = ufs_qcom_link_startup_notify,
1529 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1530 .suspend = ufs_qcom_suspend,
1531 .resume = ufs_qcom_resume,
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001532 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
Yaniv Gardi81c0fc52015-01-15 16:32:37 +02001533};
Yaniv Gardifb819ee2015-10-28 13:15:45 +02001534
Yaniv Gardi47555a52015-10-28 13:15:49 +02001535/**
1536 * ufs_qcom_probe - probe routine of the driver
1537 * @pdev: pointer to Platform device handle
1538 *
1539 * Return zero for success and non-zero for failure
1540 */
1541static int ufs_qcom_probe(struct platform_device *pdev)
1542{
1543 int err;
1544 struct device *dev = &pdev->dev;
1545
1546 /* Perform generic probe */
1547 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1548 if (err)
1549 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1550
1551 return err;
1552}
1553
1554/**
1555 * ufs_qcom_remove - set driver_data of the device to NULL
1556 * @pdev: pointer to platform device handle
1557 *
Yaniv Gardi4b9ad0b2016-03-10 17:37:19 +02001558 * Always returns 0
Yaniv Gardi47555a52015-10-28 13:15:49 +02001559 */
1560static int ufs_qcom_remove(struct platform_device *pdev)
1561{
1562 struct ufs_hba *hba = platform_get_drvdata(pdev);
1563
1564 pm_runtime_get_sync(&(pdev)->dev);
1565 ufshcd_remove(hba);
1566 return 0;
1567}
1568
1569static const struct of_device_id ufs_qcom_of_match[] = {
1570 { .compatible = "qcom,ufshc"},
1571 {},
1572};
1573
1574static const struct dev_pm_ops ufs_qcom_pm_ops = {
1575 .suspend = ufshcd_pltfrm_suspend,
1576 .resume = ufshcd_pltfrm_resume,
1577 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1578 .runtime_resume = ufshcd_pltfrm_runtime_resume,
1579 .runtime_idle = ufshcd_pltfrm_runtime_idle,
1580};
1581
1582static struct platform_driver ufs_qcom_pltform = {
1583 .probe = ufs_qcom_probe,
1584 .remove = ufs_qcom_remove,
1585 .shutdown = ufshcd_pltfrm_shutdown,
1586 .driver = {
1587 .name = "ufshcd-qcom",
1588 .pm = &ufs_qcom_pm_ops,
1589 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1590 },
1591};
1592module_platform_driver(ufs_qcom_pltform);
1593
Yaniv Gardifb819ee2015-10-28 13:15:45 +02001594MODULE_LICENSE("GPL v2");