blob: b8b9080ce8b748e224883640ab0483e9df1fcbb2 [file] [log] [blame]
Yaniv Gardiadaafaa2015-01-15 16:32:35 +02001/*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
Yaniv Gardiadaafaa2015-01-15 16:32:35 +02003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include "phy-qcom-ufs-i.h"
16
17#define MAX_PROP_NAME 32
18#define VDDA_PHY_MIN_UV 1000000
19#define VDDA_PHY_MAX_UV 1000000
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070020#define VDDA_PLL_MIN_UV 1200000
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020021#define VDDA_PLL_MAX_UV 1800000
22#define VDDP_REF_CLK_MIN_UV 1200000
23#define VDDP_REF_CLK_MAX_UV 1200000
24
25static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
26 const char *, bool);
27static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
28 const char *);
29static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30 struct ufs_qcom_phy *phy_common);
31
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070032void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
33 struct ufs_qcom_phy_calibration *tbl,
34 int tbl_size)
35{
36 int i;
37
38 for (i = 0; i < tbl_size; i++)
39 writel_relaxed(tbl[i].cfg_value,
40 ufs_qcom_phy->mmio + tbl[i].reg_offset);
41}
42EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
43
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020044int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
45 struct ufs_qcom_phy_calibration *tbl_A,
46 int tbl_size_A,
47 struct ufs_qcom_phy_calibration *tbl_B,
48 int tbl_size_B, bool is_rate_B)
49{
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020050 int ret = 0;
51
52 if (!tbl_A) {
53 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
54 ret = EINVAL;
55 goto out;
56 }
57
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070058 ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020059
60 /*
61 * In case we would like to work in rate B, we need
62 * to override a registers that were configured in rate A table
63 * with registers of rate B table.
64 * table.
65 */
66 if (is_rate_B) {
67 if (!tbl_B) {
68 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
69 __func__);
70 ret = EINVAL;
71 goto out;
72 }
73
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070074 ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020075 }
76
77 /* flush buffered writes */
78 mb();
79
80out:
81 return ret;
82}
Axel Lin358d6c82015-03-23 11:54:50 +080083EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020084
85struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
86 struct ufs_qcom_phy *common_cfg,
Axel Lin4a9e5ca2015-07-15 15:33:51 +080087 const struct phy_ops *ufs_qcom_phy_gen_ops,
Yaniv Gardiadaafaa2015-01-15 16:32:35 +020088 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
89{
90 int err;
91 struct device *dev = &pdev->dev;
92 struct phy *generic_phy = NULL;
93 struct phy_provider *phy_provider;
94
95 err = ufs_qcom_phy_base_init(pdev, common_cfg);
96 if (err) {
97 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
98 goto out;
99 }
100
101 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
102 if (IS_ERR(phy_provider)) {
103 err = PTR_ERR(phy_provider);
104 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
105 goto out;
106 }
107
108 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
109 if (IS_ERR(generic_phy)) {
110 err = PTR_ERR(generic_phy);
111 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
Axel Lind89a7f62015-03-03 09:05:55 +0800112 generic_phy = NULL;
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200113 goto out;
114 }
115
116 common_cfg->phy_spec_ops = phy_spec_ops;
117 common_cfg->dev = dev;
118
119out:
120 return generic_phy;
121}
Axel Lin358d6c82015-03-23 11:54:50 +0800122EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200123
124/*
125 * This assumes the embedded phy structure inside generic_phy is of type
126 * struct ufs_qcom_phy. In order to function properly it's crucial
127 * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
128 * as the first inside generic_phy.
129 */
130struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
131{
132 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
133}
Axel Lin358d6c82015-03-23 11:54:50 +0800134EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200135
136static
137int ufs_qcom_phy_base_init(struct platform_device *pdev,
138 struct ufs_qcom_phy *phy_common)
139{
140 struct device *dev = &pdev->dev;
141 struct resource *res;
142 int err = 0;
143
144 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700145 if (!res) {
146 dev_err(dev, "%s: phy_mem resource not found\n", __func__);
147 err = -ENOMEM;
148 goto out;
149 }
150
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200151 phy_common->mmio = devm_ioremap_resource(dev, res);
152 if (IS_ERR((void const *)phy_common->mmio)) {
153 err = PTR_ERR((void const *)phy_common->mmio);
154 phy_common->mmio = NULL;
155 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
156 __func__, err);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200157 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700158out:
159 return err;
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200160}
161
162static int __ufs_qcom_phy_clk_get(struct phy *phy,
163 const char *name, struct clk **clk_out, bool err_print)
164{
165 struct clk *clk;
166 int err = 0;
167 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
168 struct device *dev = ufs_qcom_phy->dev;
169
170 clk = devm_clk_get(dev, name);
171 if (IS_ERR(clk)) {
172 err = PTR_ERR(clk);
173 if (err_print)
174 dev_err(dev, "failed to get %s err %d", name, err);
175 } else {
176 *clk_out = clk;
177 }
178
179 return err;
180}
181
182static
183int ufs_qcom_phy_clk_get(struct phy *phy,
184 const char *name, struct clk **clk_out)
185{
186 return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
187}
188
189int
190ufs_qcom_phy_init_clks(struct phy *generic_phy,
191 struct ufs_qcom_phy *phy_common)
192{
193 int err;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700194 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200195
196 err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
197 &phy_common->tx_iface_clk);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700198 /*
199 * tx_iface_clk does not exist in newer version of ufs-phy HW,
200 * so don't return error if it is not found
201 */
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200202 if (err)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700203 dev_dbg(phy->dev, "%s: failed to get tx_iface_clk\n",
204 __func__);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200205
206 err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
207 &phy_common->rx_iface_clk);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700208 /*
209 * rx_iface_clk does not exist in newer version of ufs-phy HW,
210 * so don't return error if it is not found
211 */
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200212 if (err)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700213 dev_dbg(phy->dev, "%s: failed to get rx_iface_clk\n",
214 __func__);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200215
216 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
217 &phy_common->ref_clk_src);
218 if (err)
219 goto out;
220
221 /*
222 * "ref_clk_parent" is optional hence don't abort init if it's not
223 * found.
224 */
225 __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
226 &phy_common->ref_clk_parent, false);
227
228 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
229 &phy_common->ref_clk);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700230 if (err)
231 goto out;
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200232
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700233 /*
234 * "ref_aux_clk" is optional and only supported by certain
235 * phy versions, don't abort init if it's not found.
236 */
237 __ufs_qcom_phy_clk_get(generic_phy, "ref_aux_clk",
238 &phy_common->ref_aux_clk, false);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200239out:
240 return err;
241}
Axel Lin358d6c82015-03-23 11:54:50 +0800242EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200243
244int
245ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
246 struct ufs_qcom_phy *phy_common)
247{
248 int err;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700249 int vdda_phy_uV;
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200250
251 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
252 "vdda-pll");
253 if (err)
254 goto out;
255
256 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
257 "vdda-phy");
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200258 if (err)
259 goto out;
260
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700261 vdda_phy_uV = regulator_get_voltage(phy_common->vdda_phy.reg);
262 phy_common->vdda_phy.max_uV = vdda_phy_uV;
263 phy_common->vdda_phy.min_uV = vdda_phy_uV;
264
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200265 /* vddp-ref-clk-* properties are optional */
266 __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
267 "vddp-ref-clk", true);
268out:
269 return err;
270}
Axel Lin358d6c82015-03-23 11:54:50 +0800271EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200272
273static int __ufs_qcom_phy_init_vreg(struct phy *phy,
274 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
275{
276 int err = 0;
277 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
278 struct device *dev = ufs_qcom_phy->dev;
279
280 char prop_name[MAX_PROP_NAME];
281
282 vreg->name = kstrdup(name, GFP_KERNEL);
283 if (!vreg->name) {
284 err = -ENOMEM;
285 goto out;
286 }
287
288 vreg->reg = devm_regulator_get(dev, name);
289 if (IS_ERR(vreg->reg)) {
290 err = PTR_ERR(vreg->reg);
291 vreg->reg = NULL;
292 if (!optional)
293 dev_err(dev, "failed to get %s, %d\n", name, err);
294 goto out;
295 }
296
297 if (dev->of_node) {
298 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
299 err = of_property_read_u32(dev->of_node,
300 prop_name, &vreg->max_uA);
301 if (err && err != -EINVAL) {
302 dev_err(dev, "%s: failed to read %s\n",
303 __func__, prop_name);
304 goto out;
305 } else if (err == -EINVAL || !vreg->max_uA) {
306 if (regulator_count_voltages(vreg->reg) > 0) {
307 dev_err(dev, "%s: %s is mandatory\n",
308 __func__, prop_name);
309 goto out;
310 }
311 err = 0;
312 }
313 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
Julia Lawall3ea981e2016-08-05 13:25:13 +0200314 vreg->is_always_on = of_property_read_bool(dev->of_node,
315 prop_name);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200316 }
317
318 if (!strcmp(name, "vdda-pll")) {
319 vreg->max_uV = VDDA_PLL_MAX_UV;
320 vreg->min_uV = VDDA_PLL_MIN_UV;
321 } else if (!strcmp(name, "vdda-phy")) {
322 vreg->max_uV = VDDA_PHY_MAX_UV;
323 vreg->min_uV = VDDA_PHY_MIN_UV;
324 } else if (!strcmp(name, "vddp-ref-clk")) {
325 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
326 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
327 }
328
329out:
330 if (err)
331 kfree(vreg->name);
332 return err;
333}
334
335static int ufs_qcom_phy_init_vreg(struct phy *phy,
336 struct ufs_qcom_phy_vreg *vreg, const char *name)
337{
338 return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
339}
340
341static
342int ufs_qcom_phy_cfg_vreg(struct phy *phy,
343 struct ufs_qcom_phy_vreg *vreg, bool on)
344{
345 int ret = 0;
346 struct regulator *reg = vreg->reg;
347 const char *name = vreg->name;
348 int min_uV;
349 int uA_load;
350 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
351 struct device *dev = ufs_qcom_phy->dev;
352
353 BUG_ON(!vreg);
354
355 if (regulator_count_voltages(reg) > 0) {
356 min_uV = on ? vreg->min_uV : 0;
357 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
358 if (ret) {
359 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
360 __func__, name, ret);
361 goto out;
362 }
363 uA_load = on ? vreg->max_uA : 0;
Stephen Rothwell7e476c72015-03-10 13:44:41 +1100364 ret = regulator_set_load(reg, uA_load);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200365 if (ret >= 0) {
366 /*
Stephen Rothwell7e476c72015-03-10 13:44:41 +1100367 * regulator_set_load() returns new regulator
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200368 * mode upon success.
369 */
370 ret = 0;
371 } else {
372 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
373 __func__, name, uA_load, ret);
374 goto out;
375 }
376 }
377out:
378 return ret;
379}
380
381static
382int ufs_qcom_phy_enable_vreg(struct phy *phy,
383 struct ufs_qcom_phy_vreg *vreg)
384{
385 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
386 struct device *dev = ufs_qcom_phy->dev;
387 int ret = 0;
388
389 if (!vreg || vreg->enabled)
390 goto out;
391
392 ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
393 if (ret) {
394 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
395 __func__, ret);
396 goto out;
397 }
398
399 ret = regulator_enable(vreg->reg);
400 if (ret) {
401 dev_err(dev, "%s: enable failed, err=%d\n",
402 __func__, ret);
403 goto out;
404 }
405
406 vreg->enabled = true;
407out:
408 return ret;
409}
410
411int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
412{
413 int ret = 0;
414 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
415
416 if (phy->is_ref_clk_enabled)
417 goto out;
418
419 /*
420 * reference clock is propagated in a daisy-chained manner from
421 * source to phy, so ungate them at each stage.
422 */
423 ret = clk_prepare_enable(phy->ref_clk_src);
424 if (ret) {
425 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
426 __func__, ret);
427 goto out;
428 }
429
430 /*
431 * "ref_clk_parent" is optional clock hence make sure that clk reference
432 * is available before trying to enable the clock.
433 */
434 if (phy->ref_clk_parent) {
435 ret = clk_prepare_enable(phy->ref_clk_parent);
436 if (ret) {
437 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
438 __func__, ret);
439 goto out_disable_src;
440 }
441 }
442
443 ret = clk_prepare_enable(phy->ref_clk);
444 if (ret) {
445 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
446 __func__, ret);
447 goto out_disable_parent;
448 }
449
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700450 /*
451 * "ref_aux_clk" is optional clock and only supported by certain
452 * phy versions, hence make sure that clk reference is available
453 * before trying to enable the clock.
454 */
455 if (phy->ref_aux_clk) {
456 ret = clk_prepare_enable(phy->ref_aux_clk);
457 if (ret) {
458 dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
459 __func__, ret);
460 goto out_disable_ref;
461 }
462 }
463
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200464 phy->is_ref_clk_enabled = true;
465 goto out;
466
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700467out_disable_ref:
468 if (phy->ref_clk)
469 clk_disable_unprepare(phy->ref_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200470out_disable_parent:
471 if (phy->ref_clk_parent)
472 clk_disable_unprepare(phy->ref_clk_parent);
473out_disable_src:
474 clk_disable_unprepare(phy->ref_clk_src);
475out:
476 return ret;
477}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300478EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200479
480static
481int ufs_qcom_phy_disable_vreg(struct phy *phy,
482 struct ufs_qcom_phy_vreg *vreg)
483{
484 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
485 struct device *dev = ufs_qcom_phy->dev;
486 int ret = 0;
487
488 if (!vreg || !vreg->enabled || vreg->is_always_on)
489 goto out;
490
491 ret = regulator_disable(vreg->reg);
492
493 if (!ret) {
494 /* ignore errors on applying disable config */
495 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
496 vreg->enabled = false;
497 } else {
498 dev_err(dev, "%s: %s disable failed, err=%d\n",
499 __func__, vreg->name, ret);
500 }
501out:
502 return ret;
503}
504
505void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
506{
507 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
508
509 if (phy->is_ref_clk_enabled) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700510 /*
511 * "ref_aux_clk" is optional clock and only supported by
512 * certain phy versions, hence make sure that clk reference
513 * is available before trying to disable the clock.
514 */
515 if (phy->ref_aux_clk)
516 clk_disable_unprepare(phy->ref_aux_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200517 clk_disable_unprepare(phy->ref_clk);
518 /*
519 * "ref_clk_parent" is optional clock hence make sure that clk
520 * reference is available before trying to disable the clock.
521 */
522 if (phy->ref_clk_parent)
523 clk_disable_unprepare(phy->ref_clk_parent);
524 clk_disable_unprepare(phy->ref_clk_src);
525 phy->is_ref_clk_enabled = false;
526 }
527}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300528EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200529
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200530/* Turn ON M-PHY RMMI interface clocks */
531int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
532{
533 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
534 int ret = 0;
535
536 if (phy->is_iface_clk_enabled)
537 goto out;
538
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700539 if (!phy->tx_iface_clk)
540 goto out;
541
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200542 ret = clk_prepare_enable(phy->tx_iface_clk);
543 if (ret) {
544 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
545 __func__, ret);
546 goto out;
547 }
548 ret = clk_prepare_enable(phy->rx_iface_clk);
549 if (ret) {
550 clk_disable_unprepare(phy->tx_iface_clk);
551 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
552 __func__, ret);
553 goto out;
554 }
555 phy->is_iface_clk_enabled = true;
556
557out:
558 return ret;
559}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300560EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200561
562/* Turn OFF M-PHY RMMI interface clocks */
563void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
564{
565 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
566
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700567 if (!phy->tx_iface_clk)
568 return;
569
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200570 if (phy->is_iface_clk_enabled) {
571 clk_disable_unprepare(phy->tx_iface_clk);
572 clk_disable_unprepare(phy->rx_iface_clk);
573 phy->is_iface_clk_enabled = false;
574 }
575}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300576EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200577
578int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
579{
580 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
581 int ret = 0;
582
583 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
584 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
585 __func__);
586 ret = -ENOTSUPP;
587 } else {
588 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
589 }
590
591 return ret;
592}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300593EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200594
595int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
596{
597 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
598 int ret = 0;
599
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700600 if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200601 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
602 tx_lanes);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200603
604 return ret;
605}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300606EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200607
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700608int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
609{
610 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
611 int ret = 0;
612
613 if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
614 ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
615
616 return ret;
617}
618EXPORT_SYMBOL_GPL(ufs_qcom_phy_ctrl_rx_linecfg);
619
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200620void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
621 u8 major, u16 minor, u16 step)
622{
623 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
624
625 ufs_qcom_phy->host_ctrl_rev_major = major;
626 ufs_qcom_phy->host_ctrl_rev_minor = minor;
627 ufs_qcom_phy->host_ctrl_rev_step = step;
628}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300629EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200630
631int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
632{
633 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
634 int ret = 0;
635
636 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
637 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
638 __func__);
639 ret = -ENOTSUPP;
640 } else {
641 ret = ufs_qcom_phy->phy_spec_ops->
642 calibrate_phy(ufs_qcom_phy, is_rate_B);
643 if (ret)
644 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
645 __func__, ret);
646 }
647
648 return ret;
649}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300650EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200651
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700652const char *ufs_qcom_phy_name(struct phy *phy)
653{
654 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
655
656 return ufs_qcom_phy->name;
657}
658EXPORT_SYMBOL(ufs_qcom_phy_name);
659
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200660int ufs_qcom_phy_remove(struct phy *generic_phy,
661 struct ufs_qcom_phy *ufs_qcom_phy)
662{
663 phy_power_off(generic_phy);
664
665 kfree(ufs_qcom_phy->vdda_pll.name);
666 kfree(ufs_qcom_phy->vdda_phy.name);
667
668 return 0;
669}
Axel Lin358d6c82015-03-23 11:54:50 +0800670EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200671
672int ufs_qcom_phy_exit(struct phy *generic_phy)
673{
674 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
675
676 if (ufs_qcom_phy->is_powered_on)
677 phy_power_off(generic_phy);
678
679 return 0;
680}
Axel Lin358d6c82015-03-23 11:54:50 +0800681EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200682
683int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
684{
685 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
686
687 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
688 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
689 __func__);
690 return -ENOTSUPP;
691 }
692
693 return ufs_qcom_phy->phy_spec_ops->
694 is_physical_coding_sublayer_ready(ufs_qcom_phy);
695}
Yaniv Gardi65d49b32015-09-02 11:32:17 +0300696EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200697
698int ufs_qcom_phy_power_on(struct phy *generic_phy)
699{
700 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
701 struct device *dev = phy_common->dev;
702 int err;
703
704 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
705 if (err) {
706 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
707 __func__, err);
708 goto out;
709 }
710
711 phy_common->phy_spec_ops->power_control(phy_common, true);
712
713 /* vdda_pll also enables ref clock LDOs so enable it first */
714 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
715 if (err) {
716 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
717 __func__, err);
718 goto out_disable_phy;
719 }
720
721 err = ufs_qcom_phy_enable_ref_clk(generic_phy);
722 if (err) {
723 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
724 __func__, err);
725 goto out_disable_pll;
726 }
727
728 /* enable device PHY ref_clk pad rail */
729 if (phy_common->vddp_ref_clk.reg) {
730 err = ufs_qcom_phy_enable_vreg(generic_phy,
731 &phy_common->vddp_ref_clk);
732 if (err) {
733 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
734 __func__, err);
735 goto out_disable_ref_clk;
736 }
737 }
738
739 phy_common->is_powered_on = true;
740 goto out;
741
742out_disable_ref_clk:
743 ufs_qcom_phy_disable_ref_clk(generic_phy);
744out_disable_pll:
745 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
746out_disable_phy:
747 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
748out:
749 return err;
750}
Axel Lin358d6c82015-03-23 11:54:50 +0800751EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
Yaniv Gardiadaafaa2015-01-15 16:32:35 +0200752
753int ufs_qcom_phy_power_off(struct phy *generic_phy)
754{
755 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
756
757 phy_common->phy_spec_ops->power_control(phy_common, false);
758
759 if (phy_common->vddp_ref_clk.reg)
760 ufs_qcom_phy_disable_vreg(generic_phy,
761 &phy_common->vddp_ref_clk);
762 ufs_qcom_phy_disable_ref_clk(generic_phy);
763
764 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
765 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
766 phy_common->is_powered_on = false;
767
768 return 0;
769}
Axel Lin358d6c82015-03-23 11:54:50 +0800770EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700771
772int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
773{
774 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
775 int ret = 0;
776
777 if (ufs_qcom_phy->phy_spec_ops->configure_lpm) {
778 ret = ufs_qcom_phy->phy_spec_ops->
779 configure_lpm(ufs_qcom_phy, enable);
780 if (ret)
781 dev_err(ufs_qcom_phy->dev,
782 "%s: configure_lpm(%s) failed %d\n",
783 __func__, enable ? "enable" : "disable", ret);
784 }
785
786 return ret;
787}
788EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);