blob: 69c13970a36c1a23b16a0d6fff22b2c5610726dd [file] [log] [blame]
Hai Lia6895542015-03-31 14:36:33 -04001/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
Brian Norris964a0752015-05-20 15:59:31 -070018#include <linux/gpio/consumer.h>
Hai Lia6895542015-03-31 14:36:33 -040019#include <linux/interrupt.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/of_irq.h>
Hai Liab8909b2015-06-11 10:56:46 -040023#include <linux/pinctrl/consumer.h>
Hai Lia6895542015-03-31 14:36:33 -040024#include <linux/regulator/consumer.h>
25#include <linux/spinlock.h>
26#include <video/mipi_display.h>
27
28#include "dsi.h"
29#include "dsi.xml.h"
30
31#define MSM_DSI_VER_MAJOR_V2 0x02
32#define MSM_DSI_VER_MAJOR_6G 0x03
33#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
34#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
35#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
36#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
37#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
38
39#define DSI_6G_REG_SHIFT 4
40
Hai Lia6895542015-03-31 14:36:33 -040041struct dsi_config {
42 u32 major;
43 u32 minor;
44 u32 io_offset;
Hai Lia6895542015-03-31 14:36:33 -040045 struct dsi_reg_config reg_cfg;
46};
47
48static const struct dsi_config dsi_cfgs[] = {
Hai Liec31abf2015-05-15 13:04:06 -040049 {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
Hai Lia6895542015-03-31 14:36:33 -040050 { /* 8974 v1 */
51 .major = MSM_DSI_VER_MAJOR_6G,
52 .minor = MSM_DSI_6G_VER_MINOR_V1_0,
53 .io_offset = DSI_6G_REG_SHIFT,
Hai Lia6895542015-03-31 14:36:33 -040054 .reg_cfg = {
55 .num = 4,
56 .regs = {
57 {"gdsc", -1, -1, -1, -1},
58 {"vdd", 3000000, 3000000, 150000, 100},
59 {"vdda", 1200000, 1200000, 100000, 100},
60 {"vddio", 1800000, 1800000, 100000, 100},
61 },
62 },
63 },
64 { /* 8974 v2 */
65 .major = MSM_DSI_VER_MAJOR_6G,
66 .minor = MSM_DSI_6G_VER_MINOR_V1_1,
67 .io_offset = DSI_6G_REG_SHIFT,
Hai Lia6895542015-03-31 14:36:33 -040068 .reg_cfg = {
69 .num = 4,
70 .regs = {
71 {"gdsc", -1, -1, -1, -1},
72 {"vdd", 3000000, 3000000, 150000, 100},
73 {"vdda", 1200000, 1200000, 100000, 100},
74 {"vddio", 1800000, 1800000, 100000, 100},
75 },
76 },
77 },
78 { /* 8974 v3 */
79 .major = MSM_DSI_VER_MAJOR_6G,
80 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
81 .io_offset = DSI_6G_REG_SHIFT,
Hai Lia6895542015-03-31 14:36:33 -040082 .reg_cfg = {
83 .num = 4,
84 .regs = {
85 {"gdsc", -1, -1, -1, -1},
86 {"vdd", 3000000, 3000000, 150000, 100},
87 {"vdda", 1200000, 1200000, 100000, 100},
88 {"vddio", 1800000, 1800000, 100000, 100},
89 },
90 },
91 },
92 { /* 8084 */
93 .major = MSM_DSI_VER_MAJOR_6G,
94 .minor = MSM_DSI_6G_VER_MINOR_V1_2,
95 .io_offset = DSI_6G_REG_SHIFT,
Hai Lia6895542015-03-31 14:36:33 -040096 .reg_cfg = {
97 .num = 4,
98 .regs = {
99 {"gdsc", -1, -1, -1, -1},
100 {"vdd", 3000000, 3000000, 150000, 100},
101 {"vdda", 1200000, 1200000, 100000, 100},
102 {"vddio", 1800000, 1800000, 100000, 100},
103 },
104 },
105 },
106 { /* 8916 */
107 .major = MSM_DSI_VER_MAJOR_6G,
108 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
109 .io_offset = DSI_6G_REG_SHIFT,
Hai Lia6895542015-03-31 14:36:33 -0400110 .reg_cfg = {
111 .num = 4,
112 .regs = {
113 {"gdsc", -1, -1, -1, -1},
114 {"vdd", 2850000, 2850000, 100000, 100},
115 {"vdda", 1200000, 1200000, 100000, 100},
116 {"vddio", 1800000, 1800000, 100000, 100},
117 },
118 },
119 },
120};
121
122static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
123{
124 u32 ver;
125 u32 ver_6g;
126
127 if (!major || !minor)
128 return -EINVAL;
129
130 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
131 * makes all other registers 4-byte shifted down.
132 */
133 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
134 if (ver_6g == 0) {
135 ver = msm_readl(base + REG_DSI_VERSION);
136 ver = FIELD(ver, DSI_VERSION_MAJOR);
137 if (ver <= MSM_DSI_VER_MAJOR_V2) {
138 /* old versions */
139 *major = ver;
140 *minor = 0;
141 return 0;
142 } else {
143 return -EINVAL;
144 }
145 } else {
146 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
147 ver = FIELD(ver, DSI_VERSION_MAJOR);
148 if (ver == MSM_DSI_VER_MAJOR_6G) {
149 /* 6G version */
150 *major = ver;
151 *minor = ver_6g;
152 return 0;
153 } else {
154 return -EINVAL;
155 }
156 }
157}
158
159#define DSI_ERR_STATE_ACK 0x0000
160#define DSI_ERR_STATE_TIMEOUT 0x0001
161#define DSI_ERR_STATE_DLN0_PHY 0x0002
162#define DSI_ERR_STATE_FIFO 0x0004
163#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
164#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
165#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
166
167#define DSI_CLK_CTRL_ENABLE_CLKS \
168 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
169 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
170 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
171 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
172
173struct msm_dsi_host {
174 struct mipi_dsi_host base;
175
176 struct platform_device *pdev;
177 struct drm_device *dev;
178
179 int id;
180
181 void __iomem *ctrl_base;
Hai Liec31abf2015-05-15 13:04:06 -0400182 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
Hai Lia6895542015-03-31 14:36:33 -0400183 struct clk *mdp_core_clk;
184 struct clk *ahb_clk;
185 struct clk *axi_clk;
186 struct clk *mmss_misc_ahb_clk;
187 struct clk *byte_clk;
188 struct clk *esc_clk;
189 struct clk *pixel_clk;
Hai Li9d32c4982015-05-15 13:04:05 -0400190 struct clk *byte_clk_src;
191 struct clk *pixel_clk_src;
192
Hai Lia6895542015-03-31 14:36:33 -0400193 u32 byte_clk_rate;
194
195 struct gpio_desc *disp_en_gpio;
196 struct gpio_desc *te_gpio;
197
198 const struct dsi_config *cfg;
199
200 struct completion dma_comp;
201 struct completion video_comp;
202 struct mutex dev_mutex;
203 struct mutex cmd_mutex;
204 struct mutex clk_mutex;
205 spinlock_t intr_lock; /* Protect interrupt ctrl register */
206
207 u32 err_work_state;
208 struct work_struct err_work;
209 struct workqueue_struct *workqueue;
210
211 struct drm_gem_object *tx_gem_obj;
212 u8 *rx_buf;
213
214 struct drm_display_mode *mode;
215
216 /* Panel info */
217 struct device_node *panel_node;
218 unsigned int channel;
219 unsigned int lanes;
220 enum mipi_dsi_pixel_format format;
221 unsigned long mode_flags;
222
223 u32 dma_cmd_ctrl_restore;
224
225 bool registered;
226 bool power_on;
227 int irq;
228};
229
230static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
231{
232 switch (fmt) {
233 case MIPI_DSI_FMT_RGB565: return 16;
234 case MIPI_DSI_FMT_RGB666_PACKED: return 18;
235 case MIPI_DSI_FMT_RGB666:
236 case MIPI_DSI_FMT_RGB888:
237 default: return 24;
238 }
239}
240
241static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
242{
243 return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
244}
245static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
246{
247 msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
248}
249
250static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
251static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
252
253static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
254{
255 const struct dsi_config *cfg;
256 struct regulator *gdsc_reg;
257 int i, ret;
258 u32 major = 0, minor = 0;
259
260 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
Fabian Frederickbdc80de2015-05-04 19:03:55 +0200261 if (IS_ERR(gdsc_reg)) {
Hai Lia6895542015-03-31 14:36:33 -0400262 pr_err("%s: cannot get gdsc\n", __func__);
263 goto fail;
264 }
265 ret = regulator_enable(gdsc_reg);
266 if (ret) {
267 pr_err("%s: unable to enable gdsc\n", __func__);
268 regulator_put(gdsc_reg);
269 goto fail;
270 }
271 ret = clk_prepare_enable(msm_host->ahb_clk);
272 if (ret) {
273 pr_err("%s: unable to enable ahb_clk\n", __func__);
274 regulator_disable(gdsc_reg);
275 regulator_put(gdsc_reg);
276 goto fail;
277 }
278
279 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
280
281 clk_disable_unprepare(msm_host->ahb_clk);
282 regulator_disable(gdsc_reg);
283 regulator_put(gdsc_reg);
284 if (ret) {
285 pr_err("%s: Invalid version\n", __func__);
286 goto fail;
287 }
288
289 for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
290 cfg = dsi_cfgs + i;
291 if ((cfg->major == major) && (cfg->minor == minor))
292 return cfg;
293 }
294 pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
295
296fail:
297 return NULL;
298}
299
300static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
301{
302 return container_of(host, struct msm_dsi_host, base);
303}
304
305static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
306{
307 struct regulator_bulk_data *s = msm_host->supplies;
308 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
309 int num = msm_host->cfg->reg_cfg.num;
310 int i;
311
312 DBG("");
313 for (i = num - 1; i >= 0; i--)
314 if (regs[i].disable_load >= 0)
Dave Airlie2c33ce02015-04-20 11:32:26 +1000315 regulator_set_load(s[i].consumer,
316 regs[i].disable_load);
Hai Lia6895542015-03-31 14:36:33 -0400317
318 regulator_bulk_disable(num, s);
319}
320
321static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
322{
323 struct regulator_bulk_data *s = msm_host->supplies;
324 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
325 int num = msm_host->cfg->reg_cfg.num;
326 int ret, i;
327
328 DBG("");
329 for (i = 0; i < num; i++) {
330 if (regs[i].enable_load >= 0) {
Dave Airlie2c33ce02015-04-20 11:32:26 +1000331 ret = regulator_set_load(s[i].consumer,
332 regs[i].enable_load);
Hai Lia6895542015-03-31 14:36:33 -0400333 if (ret < 0) {
334 pr_err("regulator %d set op mode failed, %d\n",
335 i, ret);
336 goto fail;
337 }
338 }
339 }
340
341 ret = regulator_bulk_enable(num, s);
342 if (ret < 0) {
343 pr_err("regulator enable failed, %d\n", ret);
344 goto fail;
345 }
346
347 return 0;
348
349fail:
350 for (i--; i >= 0; i--)
Dave Airlie2c33ce02015-04-20 11:32:26 +1000351 regulator_set_load(s[i].consumer, regs[i].disable_load);
Hai Lia6895542015-03-31 14:36:33 -0400352 return ret;
353}
354
355static int dsi_regulator_init(struct msm_dsi_host *msm_host)
356{
357 struct regulator_bulk_data *s = msm_host->supplies;
358 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
359 int num = msm_host->cfg->reg_cfg.num;
360 int i, ret;
361
362 for (i = 0; i < num; i++)
363 s[i].supply = regs[i].name;
364
365 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
366 if (ret < 0) {
367 pr_err("%s: failed to init regulator, ret=%d\n",
368 __func__, ret);
369 return ret;
370 }
371
372 for (i = 0; i < num; i++) {
373 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
374 ret = regulator_set_voltage(s[i].consumer,
375 regs[i].min_voltage, regs[i].max_voltage);
376 if (ret < 0) {
377 pr_err("regulator %d set voltage failed, %d\n",
378 i, ret);
379 return ret;
380 }
381 }
382 }
383
384 return 0;
385}
386
387static int dsi_clk_init(struct msm_dsi_host *msm_host)
388{
389 struct device *dev = &msm_host->pdev->dev;
390 int ret = 0;
391
392 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
393 if (IS_ERR(msm_host->mdp_core_clk)) {
394 ret = PTR_ERR(msm_host->mdp_core_clk);
395 pr_err("%s: Unable to get mdp core clk. ret=%d\n",
396 __func__, ret);
397 goto exit;
398 }
399
400 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
401 if (IS_ERR(msm_host->ahb_clk)) {
402 ret = PTR_ERR(msm_host->ahb_clk);
403 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
404 __func__, ret);
405 goto exit;
406 }
407
408 msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
409 if (IS_ERR(msm_host->axi_clk)) {
410 ret = PTR_ERR(msm_host->axi_clk);
411 pr_err("%s: Unable to get axi bus clk. ret=%d\n",
412 __func__, ret);
413 goto exit;
414 }
415
416 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
417 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
418 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
419 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
420 __func__, ret);
421 goto exit;
422 }
423
424 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
425 if (IS_ERR(msm_host->byte_clk)) {
426 ret = PTR_ERR(msm_host->byte_clk);
427 pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
428 __func__, ret);
429 msm_host->byte_clk = NULL;
430 goto exit;
431 }
432
433 msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
434 if (IS_ERR(msm_host->pixel_clk)) {
435 ret = PTR_ERR(msm_host->pixel_clk);
436 pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
437 __func__, ret);
438 msm_host->pixel_clk = NULL;
439 goto exit;
440 }
441
442 msm_host->esc_clk = devm_clk_get(dev, "core_clk");
443 if (IS_ERR(msm_host->esc_clk)) {
444 ret = PTR_ERR(msm_host->esc_clk);
445 pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
446 __func__, ret);
447 msm_host->esc_clk = NULL;
448 goto exit;
449 }
450
Hai Li9d32c4982015-05-15 13:04:05 -0400451 msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
452 if (IS_ERR(msm_host->byte_clk_src)) {
453 ret = PTR_ERR(msm_host->byte_clk_src);
454 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
455 msm_host->byte_clk_src = NULL;
456 goto exit;
457 }
458
459 msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
460 if (IS_ERR(msm_host->pixel_clk_src)) {
461 ret = PTR_ERR(msm_host->pixel_clk_src);
462 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
463 msm_host->pixel_clk_src = NULL;
464 goto exit;
465 }
466
Hai Lia6895542015-03-31 14:36:33 -0400467exit:
468 return ret;
469}
470
471static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
472{
473 int ret;
474
475 DBG("id=%d", msm_host->id);
476
477 ret = clk_prepare_enable(msm_host->mdp_core_clk);
478 if (ret) {
479 pr_err("%s: failed to enable mdp_core_clock, %d\n",
480 __func__, ret);
481 goto core_clk_err;
482 }
483
484 ret = clk_prepare_enable(msm_host->ahb_clk);
485 if (ret) {
486 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
487 goto ahb_clk_err;
488 }
489
490 ret = clk_prepare_enable(msm_host->axi_clk);
491 if (ret) {
492 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
493 goto axi_clk_err;
494 }
495
496 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
497 if (ret) {
498 pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
499 __func__, ret);
500 goto misc_ahb_clk_err;
501 }
502
503 return 0;
504
505misc_ahb_clk_err:
506 clk_disable_unprepare(msm_host->axi_clk);
507axi_clk_err:
508 clk_disable_unprepare(msm_host->ahb_clk);
509ahb_clk_err:
510 clk_disable_unprepare(msm_host->mdp_core_clk);
511core_clk_err:
512 return ret;
513}
514
515static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
516{
517 DBG("");
518 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
519 clk_disable_unprepare(msm_host->axi_clk);
520 clk_disable_unprepare(msm_host->ahb_clk);
521 clk_disable_unprepare(msm_host->mdp_core_clk);
522}
523
524static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
525{
526 int ret;
527
528 DBG("Set clk rates: pclk=%d, byteclk=%d",
529 msm_host->mode->clock, msm_host->byte_clk_rate);
530
531 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
532 if (ret) {
533 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
534 goto error;
535 }
536
537 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
538 if (ret) {
539 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
540 goto error;
541 }
542
543 ret = clk_prepare_enable(msm_host->esc_clk);
544 if (ret) {
545 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
546 goto error;
547 }
548
549 ret = clk_prepare_enable(msm_host->byte_clk);
550 if (ret) {
551 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
552 goto byte_clk_err;
553 }
554
555 ret = clk_prepare_enable(msm_host->pixel_clk);
556 if (ret) {
557 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
558 goto pixel_clk_err;
559 }
560
561 return 0;
562
563pixel_clk_err:
564 clk_disable_unprepare(msm_host->byte_clk);
565byte_clk_err:
566 clk_disable_unprepare(msm_host->esc_clk);
567error:
568 return ret;
569}
570
571static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
572{
573 clk_disable_unprepare(msm_host->esc_clk);
574 clk_disable_unprepare(msm_host->pixel_clk);
575 clk_disable_unprepare(msm_host->byte_clk);
576}
577
578static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
579{
580 int ret = 0;
581
582 mutex_lock(&msm_host->clk_mutex);
583 if (enable) {
584 ret = dsi_bus_clk_enable(msm_host);
585 if (ret) {
586 pr_err("%s: Can not enable bus clk, %d\n",
587 __func__, ret);
588 goto unlock_ret;
589 }
590 ret = dsi_link_clk_enable(msm_host);
591 if (ret) {
592 pr_err("%s: Can not enable link clk, %d\n",
593 __func__, ret);
594 dsi_bus_clk_disable(msm_host);
595 goto unlock_ret;
596 }
597 } else {
598 dsi_link_clk_disable(msm_host);
599 dsi_bus_clk_disable(msm_host);
600 }
601
602unlock_ret:
603 mutex_unlock(&msm_host->clk_mutex);
604 return ret;
605}
606
607static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
608{
609 struct drm_display_mode *mode = msm_host->mode;
610 u8 lanes = msm_host->lanes;
611 u32 bpp = dsi_get_bpp(msm_host->format);
612 u32 pclk_rate;
613
614 if (!mode) {
615 pr_err("%s: mode not set\n", __func__);
616 return -EINVAL;
617 }
618
619 pclk_rate = mode->clock * 1000;
620 if (lanes > 0) {
621 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
622 } else {
623 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
624 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
625 }
626
627 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
628
629 return 0;
630}
631
632static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
633{
634 DBG("");
635 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
636 /* Make sure fully reset */
637 wmb();
638 udelay(1000);
639 dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
640 udelay(100);
641}
642
643static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
644{
645 u32 intr;
646 unsigned long flags;
647
648 spin_lock_irqsave(&msm_host->intr_lock, flags);
649 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
650
651 if (enable)
652 intr |= mask;
653 else
654 intr &= ~mask;
655
656 DBG("intr=%x enable=%d", intr, enable);
657
658 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
659 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
660}
661
662static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
663{
664 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
665 return BURST_MODE;
666 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
667 return NON_BURST_SYNCH_PULSE;
668
669 return NON_BURST_SYNCH_EVENT;
670}
671
672static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
673 const enum mipi_dsi_pixel_format mipi_fmt)
674{
675 switch (mipi_fmt) {
676 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
677 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
678 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
679 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
680 default: return VID_DST_FORMAT_RGB888;
681 }
682}
683
684static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
685 const enum mipi_dsi_pixel_format mipi_fmt)
686{
687 switch (mipi_fmt) {
688 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
689 case MIPI_DSI_FMT_RGB666_PACKED:
690 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
691 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
692 default: return CMD_DST_FORMAT_RGB888;
693 }
694}
695
696static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
697 u32 clk_pre, u32 clk_post)
698{
699 u32 flags = msm_host->mode_flags;
700 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
701 u32 data = 0;
702
703 if (!enable) {
704 dsi_write(msm_host, REG_DSI_CTRL, 0);
705 return;
706 }
707
708 if (flags & MIPI_DSI_MODE_VIDEO) {
709 if (flags & MIPI_DSI_MODE_VIDEO_HSE)
710 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
711 if (flags & MIPI_DSI_MODE_VIDEO_HFP)
712 data |= DSI_VID_CFG0_HFP_POWER_STOP;
713 if (flags & MIPI_DSI_MODE_VIDEO_HBP)
714 data |= DSI_VID_CFG0_HBP_POWER_STOP;
715 if (flags & MIPI_DSI_MODE_VIDEO_HSA)
716 data |= DSI_VID_CFG0_HSA_POWER_STOP;
717 /* Always set low power stop mode for BLLP
718 * to let command engine send packets
719 */
720 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
721 DSI_VID_CFG0_BLLP_POWER_STOP;
722 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
723 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
724 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
725 dsi_write(msm_host, REG_DSI_VID_CFG0, data);
726
727 /* Do not swap RGB colors */
728 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
729 dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
730 } else {
731 /* Do not swap RGB colors */
732 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
733 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
734 dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
735
736 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
737 DSI_CMD_CFG1_WR_MEM_CONTINUE(
738 MIPI_DCS_WRITE_MEMORY_CONTINUE);
739 /* Always insert DCS command */
740 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
741 dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
742 }
743
744 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
745 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
746 DSI_CMD_DMA_CTRL_LOW_POWER);
747
748 data = 0;
749 /* Always assume dedicated TE pin */
750 data |= DSI_TRIG_CTRL_TE;
751 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
752 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
753 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
754 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
755 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
756 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
757 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
758
759 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
760 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
761 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
762
763 data = 0;
764 if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
765 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
766 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
767
768 /* allow only ack-err-status to generate interrupt */
769 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
770
771 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
772
773 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
774
775 data = DSI_CTRL_CLK_EN;
776
777 DBG("lane number=%d", msm_host->lanes);
778 if (msm_host->lanes == 2) {
779 data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
780 /* swap lanes for 2-lane panel for better performance */
781 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
782 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
783 } else {
784 /* Take 4 lanes as default */
785 data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
786 DSI_CTRL_LANE3;
787 /* Do not swap lanes for 4-lane panel */
788 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
789 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
790 }
Archit Taneja65c5e542015-04-08 11:37:40 +0530791
792 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
793 dsi_write(msm_host, REG_DSI_LANE_CTRL,
794 DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
795
Hai Lia6895542015-03-31 14:36:33 -0400796 data |= DSI_CTRL_ENABLE;
797
798 dsi_write(msm_host, REG_DSI_CTRL, data);
799}
800
801static void dsi_timing_setup(struct msm_dsi_host *msm_host)
802{
803 struct drm_display_mode *mode = msm_host->mode;
804 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
805 u32 h_total = mode->htotal;
806 u32 v_total = mode->vtotal;
807 u32 hs_end = mode->hsync_end - mode->hsync_start;
808 u32 vs_end = mode->vsync_end - mode->vsync_start;
809 u32 ha_start = h_total - mode->hsync_start;
810 u32 ha_end = ha_start + mode->hdisplay;
811 u32 va_start = v_total - mode->vsync_start;
812 u32 va_end = va_start + mode->vdisplay;
813 u32 wc;
814
815 DBG("");
816
817 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
818 dsi_write(msm_host, REG_DSI_ACTIVE_H,
819 DSI_ACTIVE_H_START(ha_start) |
820 DSI_ACTIVE_H_END(ha_end));
821 dsi_write(msm_host, REG_DSI_ACTIVE_V,
822 DSI_ACTIVE_V_START(va_start) |
823 DSI_ACTIVE_V_END(va_end));
824 dsi_write(msm_host, REG_DSI_TOTAL,
825 DSI_TOTAL_H_TOTAL(h_total - 1) |
826 DSI_TOTAL_V_TOTAL(v_total - 1));
827
828 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
829 DSI_ACTIVE_HSYNC_START(hs_start) |
830 DSI_ACTIVE_HSYNC_END(hs_end));
831 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
832 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
833 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
834 DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
835 } else { /* command mode */
836 /* image data and 1 byte write_memory_start cmd */
837 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
838
839 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
840 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
841 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
842 msm_host->channel) |
843 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
844 MIPI_DSI_DCS_LONG_WRITE));
845
846 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
847 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
848 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
849 }
850}
851
852static void dsi_sw_reset(struct msm_dsi_host *msm_host)
853{
854 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
855 wmb(); /* clocks need to be enabled before reset */
856
857 dsi_write(msm_host, REG_DSI_RESET, 1);
858 wmb(); /* make sure reset happen */
859 dsi_write(msm_host, REG_DSI_RESET, 0);
860}
861
862static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
863 bool video_mode, bool enable)
864{
865 u32 dsi_ctrl;
866
867 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
868
869 if (!enable) {
870 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
871 DSI_CTRL_CMD_MODE_EN);
872 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
873 DSI_IRQ_MASK_VIDEO_DONE, 0);
874 } else {
875 if (video_mode) {
876 dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
877 } else { /* command mode */
878 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
879 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
880 }
881 dsi_ctrl |= DSI_CTRL_ENABLE;
882 }
883
884 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
885}
886
887static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
888{
889 u32 data;
890
891 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
892
893 if (mode == 0)
894 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
895 else
896 data |= DSI_CMD_DMA_CTRL_LOW_POWER;
897
898 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
899}
900
901static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
902{
903 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
904
905 reinit_completion(&msm_host->video_comp);
906
907 wait_for_completion_timeout(&msm_host->video_comp,
908 msecs_to_jiffies(70));
909
910 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
911}
912
913static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
914{
915 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
916 return;
917
918 if (msm_host->power_on) {
919 dsi_wait4video_done(msm_host);
920 /* delay 4 ms to skip BLLP */
921 usleep_range(2000, 4000);
922 }
923}
924
925/* dsi_cmd */
926static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
927{
928 struct drm_device *dev = msm_host->dev;
929 int ret;
930 u32 iova;
931
932 mutex_lock(&dev->struct_mutex);
933 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
934 if (IS_ERR(msm_host->tx_gem_obj)) {
935 ret = PTR_ERR(msm_host->tx_gem_obj);
936 pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
937 msm_host->tx_gem_obj = NULL;
938 mutex_unlock(&dev->struct_mutex);
939 return ret;
940 }
941
942 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
943 if (ret) {
944 pr_err("%s: failed to get iova, %d\n", __func__, ret);
945 return ret;
946 }
947 mutex_unlock(&dev->struct_mutex);
948
949 if (iova & 0x07) {
950 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
951 return -EINVAL;
952 }
953
954 return 0;
955}
956
957static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
958{
959 struct drm_device *dev = msm_host->dev;
960
961 if (msm_host->tx_gem_obj) {
962 msm_gem_put_iova(msm_host->tx_gem_obj, 0);
963 mutex_lock(&dev->struct_mutex);
964 msm_gem_free_object(msm_host->tx_gem_obj);
965 msm_host->tx_gem_obj = NULL;
966 mutex_unlock(&dev->struct_mutex);
967 }
968}
969
970/*
971 * prepare cmd buffer to be txed
972 */
973static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
974 const struct mipi_dsi_msg *msg)
975{
976 struct mipi_dsi_packet packet;
977 int len;
978 int ret;
979 u8 *data;
980
981 ret = mipi_dsi_create_packet(&packet, msg);
982 if (ret) {
983 pr_err("%s: create packet failed, %d\n", __func__, ret);
984 return ret;
985 }
986 len = (packet.size + 3) & (~0x3);
987
988 if (len > tx_gem->size) {
989 pr_err("%s: packet size is too big\n", __func__);
990 return -EINVAL;
991 }
992
993 data = msm_gem_vaddr(tx_gem);
994
995 if (IS_ERR(data)) {
996 ret = PTR_ERR(data);
997 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
998 return ret;
999 }
1000
1001 /* MSM specific command format in memory */
1002 data[0] = packet.header[1];
1003 data[1] = packet.header[2];
1004 data[2] = packet.header[0];
1005 data[3] = BIT(7); /* Last packet */
1006 if (mipi_dsi_packet_format_is_long(msg->type))
1007 data[3] |= BIT(6);
1008 if (msg->rx_buf && msg->rx_len)
1009 data[3] |= BIT(5);
1010
1011 /* Long packet */
1012 if (packet.payload && packet.payload_length)
1013 memcpy(data + 4, packet.payload, packet.payload_length);
1014
1015 /* Append 0xff to the end */
1016 if (packet.size < len)
1017 memset(data + packet.size, 0xff, len - packet.size);
1018
1019 return len;
1020}
1021
1022/*
1023 * dsi_short_read1_resp: 1 parameter
1024 */
1025static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1026{
1027 u8 *data = msg->rx_buf;
1028 if (data && (msg->rx_len >= 1)) {
1029 *data = buf[1]; /* strip out dcs type */
1030 return 1;
1031 } else {
Stephane Viau981371f2015-04-30 10:39:26 -04001032 pr_err("%s: read data does not match with rx_buf len %zu\n",
Hai Lia6895542015-03-31 14:36:33 -04001033 __func__, msg->rx_len);
1034 return -EINVAL;
1035 }
1036}
1037
1038/*
1039 * dsi_short_read2_resp: 2 parameter
1040 */
1041static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1042{
1043 u8 *data = msg->rx_buf;
1044 if (data && (msg->rx_len >= 2)) {
1045 data[0] = buf[1]; /* strip out dcs type */
1046 data[1] = buf[2];
1047 return 2;
1048 } else {
Stephane Viau981371f2015-04-30 10:39:26 -04001049 pr_err("%s: read data does not match with rx_buf len %zu\n",
Hai Lia6895542015-03-31 14:36:33 -04001050 __func__, msg->rx_len);
1051 return -EINVAL;
1052 }
1053}
1054
1055static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1056{
1057 /* strip out 4 byte dcs header */
1058 if (msg->rx_buf && msg->rx_len)
1059 memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1060
1061 return msg->rx_len;
1062}
1063
1064
1065static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1066{
1067 int ret;
1068 u32 iova;
1069 bool triggered;
1070
1071 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
1072 if (ret) {
1073 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1074 return ret;
1075 }
1076
1077 reinit_completion(&msm_host->dma_comp);
1078
1079 dsi_wait4video_eng_busy(msm_host);
1080
1081 triggered = msm_dsi_manager_cmd_xfer_trigger(
1082 msm_host->id, iova, len);
1083 if (triggered) {
1084 ret = wait_for_completion_timeout(&msm_host->dma_comp,
1085 msecs_to_jiffies(200));
1086 DBG("ret=%d", ret);
1087 if (ret == 0)
1088 ret = -ETIMEDOUT;
1089 else
1090 ret = len;
1091 } else
1092 ret = len;
1093
1094 return ret;
1095}
1096
1097static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1098 u8 *buf, int rx_byte, int pkt_size)
1099{
1100 u32 *lp, *temp, data;
1101 int i, j = 0, cnt;
Hai Lia6895542015-03-31 14:36:33 -04001102 u32 read_cnt;
1103 u8 reg[16];
1104 int repeated_bytes = 0;
1105 int buf_offset = buf - msm_host->rx_buf;
1106
1107 lp = (u32 *)buf;
1108 temp = (u32 *)reg;
1109 cnt = (rx_byte + 3) >> 2;
1110 if (cnt > 4)
1111 cnt = 4; /* 4 x 32 bits registers only */
1112
Hai Liec1936e2015-04-29 11:39:00 -04001113 if (rx_byte == 4)
1114 read_cnt = 4;
1115 else
1116 read_cnt = pkt_size + 6;
Hai Lia6895542015-03-31 14:36:33 -04001117
1118 /*
1119 * In case of multiple reads from the panel, after the first read, there
1120 * is possibility that there are some bytes in the payload repeating in
1121 * the RDBK_DATA registers. Since we read all the parameters from the
1122 * panel right from the first byte for every pass. We need to skip the
1123 * repeating bytes and then append the new parameters to the rx buffer.
1124 */
1125 if (read_cnt > 16) {
1126 int bytes_shifted;
1127 /* Any data more than 16 bytes will be shifted out.
1128 * The temp read buffer should already contain these bytes.
1129 * The remaining bytes in read buffer are the repeated bytes.
1130 */
1131 bytes_shifted = read_cnt - 16;
1132 repeated_bytes = buf_offset - bytes_shifted;
1133 }
1134
1135 for (i = cnt - 1; i >= 0; i--) {
1136 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1137 *temp++ = ntohl(data); /* to host byte order */
1138 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1139 }
1140
1141 for (i = repeated_bytes; i < 16; i++)
1142 buf[j++] = reg[i];
1143
1144 return j;
1145}
1146
1147static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1148 const struct mipi_dsi_msg *msg)
1149{
1150 int len, ret;
1151 int bllp_len = msm_host->mode->hdisplay *
1152 dsi_get_bpp(msm_host->format) / 8;
1153
1154 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
1155 if (!len) {
1156 pr_err("%s: failed to add cmd type = 0x%x\n",
1157 __func__, msg->type);
1158 return -EINVAL;
1159 }
1160
1161 /* for video mode, do not send cmds more than
1162 * one pixel line, since it only transmit it
1163 * during BLLP.
1164 */
1165 /* TODO: if the command is sent in LP mode, the bit rate is only
1166 * half of esc clk rate. In this case, if the video is already
1167 * actively streaming, we need to check more carefully if the
1168 * command can be fit into one BLLP.
1169 */
1170 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1171 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1172 __func__, len);
1173 return -EINVAL;
1174 }
1175
1176 ret = dsi_cmd_dma_tx(msm_host, len);
1177 if (ret < len) {
1178 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1179 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1180 return -ECOMM;
1181 }
1182
1183 return len;
1184}
1185
1186static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1187{
1188 u32 data0, data1;
1189
1190 data0 = dsi_read(msm_host, REG_DSI_CTRL);
1191 data1 = data0;
1192 data1 &= ~DSI_CTRL_ENABLE;
1193 dsi_write(msm_host, REG_DSI_CTRL, data1);
1194 /*
1195 * dsi controller need to be disabled before
1196 * clocks turned on
1197 */
1198 wmb();
1199
1200 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1201 wmb(); /* make sure clocks enabled */
1202
1203 /* dsi controller can only be reset while clocks are running */
1204 dsi_write(msm_host, REG_DSI_RESET, 1);
1205 wmb(); /* make sure reset happen */
1206 dsi_write(msm_host, REG_DSI_RESET, 0);
1207 wmb(); /* controller out of reset */
1208 dsi_write(msm_host, REG_DSI_CTRL, data0);
1209 wmb(); /* make sure dsi controller enabled again */
1210}
1211
1212static void dsi_err_worker(struct work_struct *work)
1213{
1214 struct msm_dsi_host *msm_host =
1215 container_of(work, struct msm_dsi_host, err_work);
1216 u32 status = msm_host->err_work_state;
1217
Rob Clarkff431fa2015-05-07 15:19:02 -04001218 pr_err_ratelimited("%s: status=%x\n", __func__, status);
Hai Lia6895542015-03-31 14:36:33 -04001219 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1220 dsi_sw_reset_restore(msm_host);
1221
1222 /* It is safe to clear here because error irq is disabled. */
1223 msm_host->err_work_state = 0;
1224
1225 /* enable dsi error interrupt */
1226 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1227}
1228
1229static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1230{
1231 u32 status;
1232
1233 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1234
1235 if (status) {
1236 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1237 /* Writing of an extra 0 needed to clear error bits */
1238 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1239 msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1240 }
1241}
1242
1243static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1244{
1245 u32 status;
1246
1247 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1248
1249 if (status) {
1250 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1251 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1252 }
1253}
1254
1255static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1256{
1257 u32 status;
1258
1259 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1260
1261 if (status) {
1262 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1263 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1264 }
1265}
1266
1267static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1268{
1269 u32 status;
1270
1271 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1272
1273 /* fifo underflow, overflow */
1274 if (status) {
1275 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1276 msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1277 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1278 msm_host->err_work_state |=
1279 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1280 }
1281}
1282
1283static void dsi_status(struct msm_dsi_host *msm_host)
1284{
1285 u32 status;
1286
1287 status = dsi_read(msm_host, REG_DSI_STATUS0);
1288
1289 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1290 dsi_write(msm_host, REG_DSI_STATUS0, status);
1291 msm_host->err_work_state |=
1292 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1293 }
1294}
1295
1296static void dsi_clk_status(struct msm_dsi_host *msm_host)
1297{
1298 u32 status;
1299
1300 status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1301
1302 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1303 dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1304 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1305 }
1306}
1307
1308static void dsi_error(struct msm_dsi_host *msm_host)
1309{
1310 /* disable dsi error interrupt */
1311 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1312
1313 dsi_clk_status(msm_host);
1314 dsi_fifo_status(msm_host);
1315 dsi_ack_err_status(msm_host);
1316 dsi_timeout_status(msm_host);
1317 dsi_status(msm_host);
1318 dsi_dln0_phy_err(msm_host);
1319
1320 queue_work(msm_host->workqueue, &msm_host->err_work);
1321}
1322
1323static irqreturn_t dsi_host_irq(int irq, void *ptr)
1324{
1325 struct msm_dsi_host *msm_host = ptr;
1326 u32 isr;
1327 unsigned long flags;
1328
1329 if (!msm_host->ctrl_base)
1330 return IRQ_HANDLED;
1331
1332 spin_lock_irqsave(&msm_host->intr_lock, flags);
1333 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1334 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1335 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1336
1337 DBG("isr=0x%x, id=%d", isr, msm_host->id);
1338
1339 if (isr & DSI_IRQ_ERROR)
1340 dsi_error(msm_host);
1341
1342 if (isr & DSI_IRQ_VIDEO_DONE)
1343 complete(&msm_host->video_comp);
1344
1345 if (isr & DSI_IRQ_CMD_DMA_DONE)
1346 complete(&msm_host->dma_comp);
1347
1348 return IRQ_HANDLED;
1349}
1350
1351static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1352 struct device *panel_device)
1353{
Uwe Kleine-König9590e692015-05-20 09:21:41 +02001354 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1355 "disp-enable",
1356 GPIOD_OUT_LOW);
Hai Lia6895542015-03-31 14:36:33 -04001357 if (IS_ERR(msm_host->disp_en_gpio)) {
1358 DBG("cannot get disp-enable-gpios %ld",
1359 PTR_ERR(msm_host->disp_en_gpio));
Uwe Kleine-König9590e692015-05-20 09:21:41 +02001360 return PTR_ERR(msm_host->disp_en_gpio);
Hai Lia6895542015-03-31 14:36:33 -04001361 }
1362
Uwe Kleine-König9590e692015-05-20 09:21:41 +02001363 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
Hai Lia6895542015-03-31 14:36:33 -04001364 if (IS_ERR(msm_host->te_gpio)) {
1365 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
Uwe Kleine-König9590e692015-05-20 09:21:41 +02001366 return PTR_ERR(msm_host->te_gpio);
Hai Lia6895542015-03-31 14:36:33 -04001367 }
1368
1369 return 0;
1370}
1371
1372static int dsi_host_attach(struct mipi_dsi_host *host,
1373 struct mipi_dsi_device *dsi)
1374{
1375 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1376 int ret;
1377
1378 msm_host->channel = dsi->channel;
1379 msm_host->lanes = dsi->lanes;
1380 msm_host->format = dsi->format;
1381 msm_host->mode_flags = dsi->mode_flags;
1382
1383 msm_host->panel_node = dsi->dev.of_node;
1384
1385 /* Some gpios defined in panel DT need to be controlled by host */
1386 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1387 if (ret)
1388 return ret;
1389
1390 DBG("id=%d", msm_host->id);
1391 if (msm_host->dev)
1392 drm_helper_hpd_irq_event(msm_host->dev);
1393
1394 return 0;
1395}
1396
1397static int dsi_host_detach(struct mipi_dsi_host *host,
1398 struct mipi_dsi_device *dsi)
1399{
1400 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1401
1402 msm_host->panel_node = NULL;
1403
1404 DBG("id=%d", msm_host->id);
1405 if (msm_host->dev)
1406 drm_helper_hpd_irq_event(msm_host->dev);
1407
1408 return 0;
1409}
1410
1411static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1412 const struct mipi_dsi_msg *msg)
1413{
1414 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1415 int ret;
1416
1417 if (!msg || !msm_host->power_on)
1418 return -EINVAL;
1419
1420 mutex_lock(&msm_host->cmd_mutex);
1421 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1422 mutex_unlock(&msm_host->cmd_mutex);
1423
1424 return ret;
1425}
1426
1427static struct mipi_dsi_host_ops dsi_host_ops = {
1428 .attach = dsi_host_attach,
1429 .detach = dsi_host_detach,
1430 .transfer = dsi_host_transfer,
1431};
1432
1433int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1434{
1435 struct msm_dsi_host *msm_host = NULL;
1436 struct platform_device *pdev = msm_dsi->pdev;
1437 int ret;
1438
1439 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1440 if (!msm_host) {
1441 pr_err("%s: FAILED: cannot alloc dsi host\n",
1442 __func__);
1443 ret = -ENOMEM;
1444 goto fail;
1445 }
1446
1447 ret = of_property_read_u32(pdev->dev.of_node,
1448 "qcom,dsi-host-index", &msm_host->id);
1449 if (ret) {
1450 dev_err(&pdev->dev,
1451 "%s: host index not specified, ret=%d\n",
1452 __func__, ret);
1453 goto fail;
1454 }
1455 msm_host->pdev = pdev;
1456
1457 ret = dsi_clk_init(msm_host);
1458 if (ret) {
1459 pr_err("%s: unable to initialize dsi clks\n", __func__);
1460 goto fail;
1461 }
1462
1463 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1464 if (IS_ERR(msm_host->ctrl_base)) {
1465 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1466 ret = PTR_ERR(msm_host->ctrl_base);
1467 goto fail;
1468 }
1469
1470 msm_host->cfg = dsi_get_config(msm_host);
1471 if (!msm_host->cfg) {
1472 ret = -EINVAL;
1473 pr_err("%s: get config failed\n", __func__);
1474 goto fail;
1475 }
1476
1477 ret = dsi_regulator_init(msm_host);
1478 if (ret) {
1479 pr_err("%s: regulator init failed\n", __func__);
1480 goto fail;
1481 }
1482
1483 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1484 if (!msm_host->rx_buf) {
1485 pr_err("%s: alloc rx temp buf failed\n", __func__);
1486 goto fail;
1487 }
1488
1489 init_completion(&msm_host->dma_comp);
1490 init_completion(&msm_host->video_comp);
1491 mutex_init(&msm_host->dev_mutex);
1492 mutex_init(&msm_host->cmd_mutex);
1493 mutex_init(&msm_host->clk_mutex);
1494 spin_lock_init(&msm_host->intr_lock);
1495
1496 /* setup workqueue */
1497 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1498 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1499
Hai Lia6895542015-03-31 14:36:33 -04001500 msm_dsi->host = &msm_host->base;
1501 msm_dsi->id = msm_host->id;
1502
1503 DBG("Dsi Host %d initialized", msm_host->id);
1504 return 0;
1505
1506fail:
1507 return ret;
1508}
1509
1510void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1511{
1512 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1513
1514 DBG("");
1515 dsi_tx_buf_free(msm_host);
1516 if (msm_host->workqueue) {
1517 flush_workqueue(msm_host->workqueue);
1518 destroy_workqueue(msm_host->workqueue);
1519 msm_host->workqueue = NULL;
1520 }
1521
1522 mutex_destroy(&msm_host->clk_mutex);
1523 mutex_destroy(&msm_host->cmd_mutex);
1524 mutex_destroy(&msm_host->dev_mutex);
1525}
1526
1527int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1528 struct drm_device *dev)
1529{
1530 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1531 struct platform_device *pdev = msm_host->pdev;
1532 int ret;
1533
1534 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1535 if (msm_host->irq < 0) {
1536 ret = msm_host->irq;
1537 dev_err(dev->dev, "failed to get irq: %d\n", ret);
1538 return ret;
1539 }
1540
1541 ret = devm_request_irq(&pdev->dev, msm_host->irq,
1542 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1543 "dsi_isr", msm_host);
1544 if (ret < 0) {
1545 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1546 msm_host->irq, ret);
1547 return ret;
1548 }
1549
1550 msm_host->dev = dev;
1551 ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1552 if (ret) {
1553 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1554 return ret;
1555 }
1556
1557 return 0;
1558}
1559
1560int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1561{
1562 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1563 struct device_node *node;
1564 int ret;
1565
1566 /* Register mipi dsi host */
1567 if (!msm_host->registered) {
1568 host->dev = &msm_host->pdev->dev;
1569 host->ops = &dsi_host_ops;
1570 ret = mipi_dsi_host_register(host);
1571 if (ret)
1572 return ret;
1573
1574 msm_host->registered = true;
1575
1576 /* If the panel driver has not been probed after host register,
1577 * we should defer the host's probe.
1578 * It makes sure panel is connected when fbcon detects
1579 * connector status and gets the proper display mode to
1580 * create framebuffer.
1581 */
1582 if (check_defer) {
1583 node = of_get_child_by_name(msm_host->pdev->dev.of_node,
1584 "panel");
1585 if (node) {
1586 if (!of_drm_find_panel(node))
1587 return -EPROBE_DEFER;
1588 }
1589 }
1590 }
1591
1592 return 0;
1593}
1594
1595void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1596{
1597 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1598
1599 if (msm_host->registered) {
1600 mipi_dsi_host_unregister(host);
1601 host->dev = NULL;
1602 host->ops = NULL;
1603 msm_host->registered = false;
1604 }
1605}
1606
1607int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1608 const struct mipi_dsi_msg *msg)
1609{
1610 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1611
1612 /* TODO: make sure dsi_cmd_mdp is idle.
1613 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1614 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1615 * How to handle the old versions? Wait for mdp cmd done?
1616 */
1617
1618 /*
1619 * mdss interrupt is generated in mdp core clock domain
1620 * mdp clock need to be enabled to receive dsi interrupt
1621 */
1622 dsi_clk_ctrl(msm_host, 1);
1623
1624 /* TODO: vote for bus bandwidth */
1625
1626 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1627 dsi_set_tx_power_mode(0, msm_host);
1628
1629 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1630 dsi_write(msm_host, REG_DSI_CTRL,
1631 msm_host->dma_cmd_ctrl_restore |
1632 DSI_CTRL_CMD_MODE_EN |
1633 DSI_CTRL_ENABLE);
1634 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1635
1636 return 0;
1637}
1638
1639void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1640 const struct mipi_dsi_msg *msg)
1641{
1642 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1643
1644 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1645 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1646
1647 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1648 dsi_set_tx_power_mode(1, msm_host);
1649
1650 /* TODO: unvote for bus bandwidth */
1651
1652 dsi_clk_ctrl(msm_host, 0);
1653}
1654
1655int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1656 const struct mipi_dsi_msg *msg)
1657{
1658 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1659
1660 return dsi_cmds2buf_tx(msm_host, msg);
1661}
1662
1663int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1664 const struct mipi_dsi_msg *msg)
1665{
1666 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1667 int data_byte, rx_byte, dlen, end;
1668 int short_response, diff, pkt_size, ret = 0;
1669 char cmd;
1670 int rlen = msg->rx_len;
1671 u8 *buf;
1672
1673 if (rlen <= 2) {
1674 short_response = 1;
1675 pkt_size = rlen;
1676 rx_byte = 4;
1677 } else {
1678 short_response = 0;
1679 data_byte = 10; /* first read */
1680 if (rlen < data_byte)
1681 pkt_size = rlen;
1682 else
1683 pkt_size = data_byte;
1684 rx_byte = data_byte + 6; /* 4 header + 2 crc */
1685 }
1686
1687 buf = msm_host->rx_buf;
1688 end = 0;
1689 while (!end) {
1690 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1691 struct mipi_dsi_msg max_pkt_size_msg = {
1692 .channel = msg->channel,
1693 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1694 .tx_len = 2,
1695 .tx_buf = tx,
1696 };
1697
1698 DBG("rlen=%d pkt_size=%d rx_byte=%d",
1699 rlen, pkt_size, rx_byte);
1700
1701 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1702 if (ret < 2) {
1703 pr_err("%s: Set max pkt size failed, %d\n",
1704 __func__, ret);
1705 return -EINVAL;
1706 }
1707
1708 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
1709 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1710 /* Clear the RDBK_DATA registers */
1711 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1712 DSI_RDBK_DATA_CTRL_CLR);
1713 wmb(); /* make sure the RDBK registers are cleared */
1714 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1715 wmb(); /* release cleared status before transfer */
1716 }
1717
1718 ret = dsi_cmds2buf_tx(msm_host, msg);
1719 if (ret < msg->tx_len) {
1720 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1721 return ret;
1722 }
1723
1724 /*
1725 * once cmd_dma_done interrupt received,
1726 * return data from client is ready and stored
1727 * at RDBK_DATA register already
1728 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1729 * after that dcs header lost during shift into registers
1730 */
1731 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1732
1733 if (dlen <= 0)
1734 return 0;
1735
1736 if (short_response)
1737 break;
1738
1739 if (rlen <= data_byte) {
1740 diff = data_byte - rlen;
1741 end = 1;
1742 } else {
1743 diff = 0;
1744 rlen -= data_byte;
1745 }
1746
1747 if (!end) {
1748 dlen -= 2; /* 2 crc */
1749 dlen -= diff;
1750 buf += dlen; /* next start position */
1751 data_byte = 14; /* NOT first read */
1752 if (rlen < data_byte)
1753 pkt_size += rlen;
1754 else
1755 pkt_size += data_byte;
1756 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
1757 }
1758 }
1759
1760 /*
1761 * For single Long read, if the requested rlen < 10,
1762 * we need to shift the start position of rx
1763 * data buffer to skip the bytes which are not
1764 * updated.
1765 */
1766 if (pkt_size < 10 && !short_response)
1767 buf = msm_host->rx_buf + (10 - rlen);
1768 else
1769 buf = msm_host->rx_buf;
1770
1771 cmd = buf[0];
1772 switch (cmd) {
1773 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1774 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
1775 ret = 0;
Hai Li651ad3f2015-04-29 11:38:59 -04001776 break;
Hai Lia6895542015-03-31 14:36:33 -04001777 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
1778 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1779 ret = dsi_short_read1_resp(buf, msg);
1780 break;
1781 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
1782 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
1783 ret = dsi_short_read2_resp(buf, msg);
1784 break;
1785 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
1786 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
1787 ret = dsi_long_read_resp(buf, msg);
1788 break;
1789 default:
1790 pr_warn("%s:Invalid response cmd\n", __func__);
1791 ret = 0;
1792 }
1793
1794 return ret;
1795}
1796
1797void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
1798{
1799 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1800
1801 dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
1802 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1803 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1804
1805 /* Make sure trigger happens */
1806 wmb();
1807}
1808
Hai Li9d32c4982015-05-15 13:04:05 -04001809int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1810 struct msm_dsi_pll *src_pll)
1811{
1812 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1813 struct clk *byte_clk_provider, *pixel_clk_provider;
1814 int ret;
1815
1816 ret = msm_dsi_pll_get_clk_provider(src_pll,
1817 &byte_clk_provider, &pixel_clk_provider);
1818 if (ret) {
1819 pr_info("%s: can't get provider from pll, don't set parent\n",
1820 __func__);
1821 return 0;
1822 }
1823
1824 ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
1825 if (ret) {
1826 pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
1827 __func__, ret);
1828 goto exit;
1829 }
1830
1831 ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
1832 if (ret) {
1833 pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
1834 __func__, ret);
1835 goto exit;
1836 }
1837
1838exit:
1839 return ret;
1840}
1841
Hai Lia6895542015-03-31 14:36:33 -04001842int msm_dsi_host_enable(struct mipi_dsi_host *host)
1843{
1844 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1845
1846 dsi_op_mode_config(msm_host,
1847 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
1848
1849 /* TODO: clock should be turned off for command mode,
1850 * and only turned on before MDP START.
1851 * This part of code should be enabled once mdp driver support it.
1852 */
1853 /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
1854 dsi_clk_ctrl(msm_host, 0); */
1855
1856 return 0;
1857}
1858
1859int msm_dsi_host_disable(struct mipi_dsi_host *host)
1860{
1861 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1862
1863 dsi_op_mode_config(msm_host,
1864 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
1865
1866 /* Since we have disabled INTF, the video engine won't stop so that
1867 * the cmd engine will be blocked.
1868 * Reset to disable video engine so that we can send off cmd.
1869 */
1870 dsi_sw_reset(msm_host);
1871
1872 return 0;
1873}
1874
1875int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1876{
1877 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1878 u32 clk_pre = 0, clk_post = 0;
1879 int ret = 0;
1880
1881 mutex_lock(&msm_host->dev_mutex);
1882 if (msm_host->power_on) {
1883 DBG("dsi host already on");
1884 goto unlock_ret;
1885 }
1886
1887 ret = dsi_calc_clk_rate(msm_host);
1888 if (ret) {
1889 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
1890 goto unlock_ret;
1891 }
1892
1893 ret = dsi_host_regulator_enable(msm_host);
1894 if (ret) {
1895 pr_err("%s:Failed to enable vregs.ret=%d\n",
1896 __func__, ret);
1897 goto unlock_ret;
1898 }
1899
1900 ret = dsi_bus_clk_enable(msm_host);
1901 if (ret) {
1902 pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
1903 goto fail_disable_reg;
1904 }
1905
1906 dsi_phy_sw_reset(msm_host);
1907 ret = msm_dsi_manager_phy_enable(msm_host->id,
1908 msm_host->byte_clk_rate * 8,
1909 clk_get_rate(msm_host->esc_clk),
1910 &clk_pre, &clk_post);
1911 dsi_bus_clk_disable(msm_host);
1912 if (ret) {
1913 pr_err("%s: failed to enable phy, %d\n", __func__, ret);
1914 goto fail_disable_reg;
1915 }
1916
1917 ret = dsi_clk_ctrl(msm_host, 1);
1918 if (ret) {
1919 pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
1920 goto fail_disable_reg;
1921 }
1922
Hai Liab8909b2015-06-11 10:56:46 -04001923 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
1924 if (ret) {
1925 pr_err("%s: failed to set pinctrl default state, %d\n",
1926 __func__, ret);
1927 goto fail_disable_clk;
1928 }
1929
Hai Lia6895542015-03-31 14:36:33 -04001930 dsi_timing_setup(msm_host);
1931 dsi_sw_reset(msm_host);
1932 dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
1933
1934 if (msm_host->disp_en_gpio)
1935 gpiod_set_value(msm_host->disp_en_gpio, 1);
1936
1937 msm_host->power_on = true;
1938 mutex_unlock(&msm_host->dev_mutex);
1939
1940 return 0;
1941
Hai Liab8909b2015-06-11 10:56:46 -04001942fail_disable_clk:
1943 dsi_clk_ctrl(msm_host, 0);
Hai Lia6895542015-03-31 14:36:33 -04001944fail_disable_reg:
1945 dsi_host_regulator_disable(msm_host);
1946unlock_ret:
1947 mutex_unlock(&msm_host->dev_mutex);
1948 return ret;
1949}
1950
1951int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1952{
1953 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1954
1955 mutex_lock(&msm_host->dev_mutex);
1956 if (!msm_host->power_on) {
1957 DBG("dsi host already off");
1958 goto unlock_ret;
1959 }
1960
1961 dsi_ctrl_config(msm_host, false, 0, 0);
1962
1963 if (msm_host->disp_en_gpio)
1964 gpiod_set_value(msm_host->disp_en_gpio, 0);
1965
Hai Liab8909b2015-06-11 10:56:46 -04001966 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
1967
Hai Lia6895542015-03-31 14:36:33 -04001968 msm_dsi_manager_phy_disable(msm_host->id);
1969
1970 dsi_clk_ctrl(msm_host, 0);
1971
1972 dsi_host_regulator_disable(msm_host);
1973
1974 DBG("-");
1975
1976 msm_host->power_on = false;
1977
1978unlock_ret:
1979 mutex_unlock(&msm_host->dev_mutex);
1980 return 0;
1981}
1982
1983int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
1984 struct drm_display_mode *mode)
1985{
1986 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1987
1988 if (msm_host->mode) {
1989 drm_mode_destroy(msm_host->dev, msm_host->mode);
1990 msm_host->mode = NULL;
1991 }
1992
1993 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
1994 if (IS_ERR(msm_host->mode)) {
1995 pr_err("%s: cannot duplicate mode\n", __func__);
1996 return PTR_ERR(msm_host->mode);
1997 }
1998
1999 return 0;
2000}
2001
2002struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2003 unsigned long *panel_flags)
2004{
2005 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2006 struct drm_panel *panel;
2007
2008 panel = of_drm_find_panel(msm_host->panel_node);
2009 if (panel_flags)
2010 *panel_flags = msm_host->mode_flags;
2011
2012 return panel;
2013}
2014