blob: ffee8d8c379489ced9437e32c2f4377893473cf5 [file] [log] [blame]
Mark Yao2048e322014-08-22 18:36:26 +08001/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
Mark Yao63ebb9f2015-11-30 18:22:42 +080017#include <drm/drm_atomic.h>
Mark Yao2048e322014-08-22 18:36:26 +080018#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
Tomasz Figa47a7eb42016-09-14 21:54:57 +090020#include <drm/drm_flip_work.h>
Mark Yao2048e322014-08-22 18:36:26 +080021#include <drm/drm_plane_helper.h>
22
23#include <linux/kernel.h>
Paul Gortmaker00fe6142015-05-01 20:02:30 -040024#include <linux/module.h>
Mark Yao2048e322014-08-22 18:36:26 +080025#include <linux/platform_device.h>
26#include <linux/clk.h>
Tomasz Figa7caecdb2016-09-14 21:54:56 +090027#include <linux/iopoll.h>
Mark Yao2048e322014-08-22 18:36:26 +080028#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/component.h>
32
33#include <linux/reset.h>
34#include <linux/delay.h>
35
36#include "rockchip_drm_drv.h"
37#include "rockchip_drm_gem.h"
38#include "rockchip_drm_fb.h"
Yakir Yang5182c1a2016-07-24 14:57:44 +080039#include "rockchip_drm_psr.h"
Mark Yao2048e322014-08-22 18:36:26 +080040#include "rockchip_drm_vop.h"
41
Mark Yaod49463e2016-04-20 14:18:15 +080042#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
43 vop_mask_write(x, off, mask, shift, v, write_mask, true)
44
45#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
46 vop_mask_write(x, off, mask, shift, v, write_mask, false)
Mark Yao2048e322014-08-22 18:36:26 +080047
48#define REG_SET(x, base, reg, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080049 __REG_SET_##mode(x, base + reg.offset, \
50 reg.mask, reg.shift, v, reg.write_mask)
John Keepingc7647f82016-01-12 18:05:18 +000051#define REG_SET_MASK(x, base, reg, mask, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080052 __REG_SET_##mode(x, base + reg.offset, \
53 mask, reg.shift, v, reg.write_mask)
Mark Yao2048e322014-08-22 18:36:26 +080054
55#define VOP_WIN_SET(x, win, name, v) \
56 REG_SET(x, win->base, win->phy->name, v, RELAXED)
Mark Yao4c156c22015-06-26 17:14:46 +080057#define VOP_SCL_SET(x, win, name, v) \
58 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
Mark Yao1194fff2015-12-15 09:08:43 +080059#define VOP_SCL_SET_EXT(x, win, name, v) \
60 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
Mark Yao2048e322014-08-22 18:36:26 +080061#define VOP_CTRL_SET(x, name, v) \
62 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
63
Mark Yaodbb3d942015-12-15 08:36:55 +080064#define VOP_INTR_GET(vop, name) \
65 vop_read_reg(vop, 0, &vop->data->ctrl->name)
66
John Keepingc7647f82016-01-12 18:05:18 +000067#define VOP_INTR_SET(vop, name, mask, v) \
68 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
Mark Yaodbb3d942015-12-15 08:36:55 +080069#define VOP_INTR_SET_TYPE(vop, name, type, v) \
70 do { \
John Keepingc7647f82016-01-12 18:05:18 +000071 int i, reg = 0, mask = 0; \
Mark Yaodbb3d942015-12-15 08:36:55 +080072 for (i = 0; i < vop->data->intr->nintrs; i++) { \
John Keepingc7647f82016-01-12 18:05:18 +000073 if (vop->data->intr->intrs[i] & type) { \
Mark Yaodbb3d942015-12-15 08:36:55 +080074 reg |= (v) << i; \
John Keepingc7647f82016-01-12 18:05:18 +000075 mask |= 1 << i; \
76 } \
Mark Yaodbb3d942015-12-15 08:36:55 +080077 } \
John Keepingc7647f82016-01-12 18:05:18 +000078 VOP_INTR_SET(vop, name, mask, reg); \
Mark Yaodbb3d942015-12-15 08:36:55 +080079 } while (0)
80#define VOP_INTR_GET_TYPE(vop, name, type) \
81 vop_get_intr_type(vop, &vop->data->intr->name, type)
82
Mark Yao2048e322014-08-22 18:36:26 +080083#define VOP_WIN_GET(x, win, name) \
84 vop_read_reg(x, win->base, &win->phy->name)
85
86#define VOP_WIN_GET_YRGBADDR(vop, win) \
87 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
88
89#define to_vop(x) container_of(x, struct vop, crtc)
90#define to_vop_win(x) container_of(x, struct vop_win, base)
91
Tomasz Figa47a7eb42016-09-14 21:54:57 +090092enum vop_pending {
93 VOP_PENDING_FB_UNREF,
94};
95
Mark Yao2048e322014-08-22 18:36:26 +080096struct vop_win {
97 struct drm_plane base;
98 const struct vop_win_data *data;
99 struct vop *vop;
Mark Yao2048e322014-08-22 18:36:26 +0800100};
101
102struct vop {
103 struct drm_crtc crtc;
104 struct device *dev;
105 struct drm_device *drm_dev;
Mark Yao31e980c2015-01-22 14:37:56 +0800106 bool is_enabled;
Mark Yao2048e322014-08-22 18:36:26 +0800107
Mark Yao2048e322014-08-22 18:36:26 +0800108 /* mutex vsync_ work */
109 struct mutex vsync_mutex;
110 bool vsync_work_pending;
Mark Yao10672192015-02-04 13:10:31 +0800111 struct completion dsp_hold_completion;
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200112
113 /* protected by dev->event_lock */
Mark Yao63ebb9f2015-11-30 18:22:42 +0800114 struct drm_pending_vblank_event *event;
Mark Yao2048e322014-08-22 18:36:26 +0800115
Tomasz Figa47a7eb42016-09-14 21:54:57 +0900116 struct drm_flip_work fb_unref_work;
117 unsigned long pending;
118
Yakir Yang69c34e42016-07-24 14:57:40 +0800119 struct completion line_flag_completion;
120
Mark Yao2048e322014-08-22 18:36:26 +0800121 const struct vop_data *data;
122
123 uint32_t *regsbak;
124 void __iomem *regs;
125
126 /* physical map length of vop register */
127 uint32_t len;
128
129 /* one time only one process allowed to config the register */
130 spinlock_t reg_lock;
131 /* lock vop irq reg */
132 spinlock_t irq_lock;
133
134 unsigned int irq;
135
136 /* vop AHP clk */
137 struct clk *hclk;
138 /* vop dclk */
139 struct clk *dclk;
140 /* vop share memory frequency */
141 struct clk *aclk;
142
143 /* vop dclk reset */
144 struct reset_control *dclk_rst;
145
Mark Yao2048e322014-08-22 18:36:26 +0800146 struct vop_win win[];
147};
148
Mark Yao2048e322014-08-22 18:36:26 +0800149static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
150{
151 writel(v, vop->regs + offset);
152 vop->regsbak[offset >> 2] = v;
153}
154
155static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
156{
157 return readl(vop->regs + offset);
158}
159
160static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
161 const struct vop_reg *reg)
162{
163 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
164}
165
Mark Yao2048e322014-08-22 18:36:26 +0800166static inline void vop_mask_write(struct vop *vop, uint32_t offset,
Mark Yaod49463e2016-04-20 14:18:15 +0800167 uint32_t mask, uint32_t shift, uint32_t v,
168 bool write_mask, bool relaxed)
Mark Yao2048e322014-08-22 18:36:26 +0800169{
Mark Yaod49463e2016-04-20 14:18:15 +0800170 if (!mask)
171 return;
172
173 if (write_mask) {
174 v = ((v << shift) & 0xffff) | (mask << (shift + 16));
175 } else {
Mark Yao2048e322014-08-22 18:36:26 +0800176 uint32_t cached_val = vop->regsbak[offset >> 2];
177
Mark Yaod49463e2016-04-20 14:18:15 +0800178 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
179 vop->regsbak[offset >> 2] = v;
Mark Yao2048e322014-08-22 18:36:26 +0800180 }
Mark Yao2048e322014-08-22 18:36:26 +0800181
Mark Yaod49463e2016-04-20 14:18:15 +0800182 if (relaxed)
183 writel_relaxed(v, vop->regs + offset);
184 else
185 writel(v, vop->regs + offset);
Mark Yao2048e322014-08-22 18:36:26 +0800186}
187
Mark Yaodbb3d942015-12-15 08:36:55 +0800188static inline uint32_t vop_get_intr_type(struct vop *vop,
189 const struct vop_reg *reg, int type)
190{
191 uint32_t i, ret = 0;
192 uint32_t regs = vop_read_reg(vop, 0, reg);
193
194 for (i = 0; i < vop->data->intr->nintrs; i++) {
195 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
196 ret |= vop->data->intr->intrs[i];
197 }
198
199 return ret;
200}
201
Mark Yao0cf33fe2015-12-14 18:14:36 +0800202static inline void vop_cfg_done(struct vop *vop)
203{
204 VOP_CTRL_SET(vop, cfg_done, 1);
205}
206
Tomasz Figa85a359f2015-05-11 19:55:39 +0900207static bool has_rb_swapped(uint32_t format)
208{
209 switch (format) {
210 case DRM_FORMAT_XBGR8888:
211 case DRM_FORMAT_ABGR8888:
212 case DRM_FORMAT_BGR888:
213 case DRM_FORMAT_BGR565:
214 return true;
215 default:
216 return false;
217 }
218}
219
Mark Yao2048e322014-08-22 18:36:26 +0800220static enum vop_data_format vop_convert_format(uint32_t format)
221{
222 switch (format) {
223 case DRM_FORMAT_XRGB8888:
224 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900225 case DRM_FORMAT_XBGR8888:
226 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800227 return VOP_FMT_ARGB8888;
228 case DRM_FORMAT_RGB888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900229 case DRM_FORMAT_BGR888:
Mark Yao2048e322014-08-22 18:36:26 +0800230 return VOP_FMT_RGB888;
231 case DRM_FORMAT_RGB565:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900232 case DRM_FORMAT_BGR565:
Mark Yao2048e322014-08-22 18:36:26 +0800233 return VOP_FMT_RGB565;
234 case DRM_FORMAT_NV12:
235 return VOP_FMT_YUV420SP;
236 case DRM_FORMAT_NV16:
237 return VOP_FMT_YUV422SP;
238 case DRM_FORMAT_NV24:
239 return VOP_FMT_YUV444SP;
240 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400241 DRM_ERROR("unsupported format[%08x]\n", format);
Mark Yao2048e322014-08-22 18:36:26 +0800242 return -EINVAL;
243 }
244}
245
Mark Yao84c7f8c2015-07-20 16:16:49 +0800246static bool is_yuv_support(uint32_t format)
247{
248 switch (format) {
249 case DRM_FORMAT_NV12:
250 case DRM_FORMAT_NV16:
251 case DRM_FORMAT_NV24:
252 return true;
253 default:
254 return false;
255 }
256}
257
Mark Yao2048e322014-08-22 18:36:26 +0800258static bool is_alpha_support(uint32_t format)
259{
260 switch (format) {
261 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900262 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800263 return true;
264 default:
265 return false;
266 }
267}
268
Mark Yao4c156c22015-06-26 17:14:46 +0800269static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
270 uint32_t dst, bool is_horizontal,
271 int vsu_mode, int *vskiplines)
272{
273 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
274
275 if (is_horizontal) {
276 if (mode == SCALE_UP)
277 val = GET_SCL_FT_BIC(src, dst);
278 else if (mode == SCALE_DOWN)
279 val = GET_SCL_FT_BILI_DN(src, dst);
280 } else {
281 if (mode == SCALE_UP) {
282 if (vsu_mode == SCALE_UP_BIL)
283 val = GET_SCL_FT_BILI_UP(src, dst);
284 else
285 val = GET_SCL_FT_BIC(src, dst);
286 } else if (mode == SCALE_DOWN) {
287 if (vskiplines) {
288 *vskiplines = scl_get_vskiplines(src, dst);
289 val = scl_get_bili_dn_vskip(src, dst,
290 *vskiplines);
291 } else {
292 val = GET_SCL_FT_BILI_DN(src, dst);
293 }
294 }
295 }
296
297 return val;
298}
299
300static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
301 uint32_t src_w, uint32_t src_h, uint32_t dst_w,
302 uint32_t dst_h, uint32_t pixel_format)
303{
304 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
305 uint16_t cbcr_hor_scl_mode = SCALE_NONE;
306 uint16_t cbcr_ver_scl_mode = SCALE_NONE;
307 int hsub = drm_format_horz_chroma_subsampling(pixel_format);
308 int vsub = drm_format_vert_chroma_subsampling(pixel_format);
309 bool is_yuv = is_yuv_support(pixel_format);
310 uint16_t cbcr_src_w = src_w / hsub;
311 uint16_t cbcr_src_h = src_h / vsub;
312 uint16_t vsu_mode;
313 uint16_t lb_mode;
314 uint32_t val;
Mark Yao2db00cf2016-04-29 15:39:53 +0800315 int vskiplines = 0;
Mark Yao4c156c22015-06-26 17:14:46 +0800316
317 if (dst_w > 3840) {
Sean Paulee4d7892016-08-12 13:00:54 -0400318 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800319 return;
320 }
321
Mark Yao1194fff2015-12-15 09:08:43 +0800322 if (!win->phy->scl->ext) {
323 VOP_SCL_SET(vop, win, scale_yrgb_x,
324 scl_cal_scale2(src_w, dst_w));
325 VOP_SCL_SET(vop, win, scale_yrgb_y,
326 scl_cal_scale2(src_h, dst_h));
327 if (is_yuv) {
328 VOP_SCL_SET(vop, win, scale_cbcr_x,
Mark Yaoee8662f2016-06-06 15:58:46 +0800329 scl_cal_scale2(cbcr_src_w, dst_w));
Mark Yao1194fff2015-12-15 09:08:43 +0800330 VOP_SCL_SET(vop, win, scale_cbcr_y,
Mark Yaoee8662f2016-06-06 15:58:46 +0800331 scl_cal_scale2(cbcr_src_h, dst_h));
Mark Yao1194fff2015-12-15 09:08:43 +0800332 }
333 return;
334 }
335
Mark Yao4c156c22015-06-26 17:14:46 +0800336 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
337 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
338
339 if (is_yuv) {
340 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
341 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
342 if (cbcr_hor_scl_mode == SCALE_DOWN)
343 lb_mode = scl_vop_cal_lb_mode(dst_w, true);
344 else
345 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
346 } else {
347 if (yrgb_hor_scl_mode == SCALE_DOWN)
348 lb_mode = scl_vop_cal_lb_mode(dst_w, false);
349 else
350 lb_mode = scl_vop_cal_lb_mode(src_w, false);
351 }
352
Mark Yao1194fff2015-12-15 09:08:43 +0800353 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800354 if (lb_mode == LB_RGB_3840X2) {
355 if (yrgb_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400356 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800357 return;
358 }
359 if (cbcr_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400360 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800361 return;
362 }
363 vsu_mode = SCALE_UP_BIL;
364 } else if (lb_mode == LB_RGB_2560X4) {
365 vsu_mode = SCALE_UP_BIL;
366 } else {
367 vsu_mode = SCALE_UP_BIC;
368 }
369
370 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
371 true, 0, NULL);
372 VOP_SCL_SET(vop, win, scale_yrgb_x, val);
373 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
374 false, vsu_mode, &vskiplines);
375 VOP_SCL_SET(vop, win, scale_yrgb_y, val);
376
Mark Yao1194fff2015-12-15 09:08:43 +0800377 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
378 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
Mark Yao4c156c22015-06-26 17:14:46 +0800379
Mark Yao1194fff2015-12-15 09:08:43 +0800380 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
381 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
382 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
383 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
384 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800385 if (is_yuv) {
386 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
387 dst_w, true, 0, NULL);
388 VOP_SCL_SET(vop, win, scale_cbcr_x, val);
389 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
390 dst_h, false, vsu_mode, &vskiplines);
391 VOP_SCL_SET(vop, win, scale_cbcr_y, val);
392
Mark Yao1194fff2015-12-15 09:08:43 +0800393 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
394 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
395 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
396 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
397 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
398 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
399 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800400 }
401}
402
Mark Yao10672192015-02-04 13:10:31 +0800403static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
404{
405 unsigned long flags;
406
407 if (WARN_ON(!vop->is_enabled))
408 return;
409
410 spin_lock_irqsave(&vop->irq_lock, flags);
411
Tomasz Figafa374102016-09-14 21:54:54 +0900412 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800413 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
Mark Yao10672192015-02-04 13:10:31 +0800414
415 spin_unlock_irqrestore(&vop->irq_lock, flags);
416}
417
418static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
419{
420 unsigned long flags;
421
422 if (WARN_ON(!vop->is_enabled))
423 return;
424
425 spin_lock_irqsave(&vop->irq_lock, flags);
426
Mark Yaodbb3d942015-12-15 08:36:55 +0800427 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
Mark Yao10672192015-02-04 13:10:31 +0800428
429 spin_unlock_irqrestore(&vop->irq_lock, flags);
430}
431
Yakir Yang69c34e42016-07-24 14:57:40 +0800432/*
433 * (1) each frame starts at the start of the Vsync pulse which is signaled by
434 * the "FRAME_SYNC" interrupt.
435 * (2) the active data region of each frame ends at dsp_vact_end
436 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
437 * to get "LINE_FLAG" interrupt at the end of the active on screen data.
438 *
439 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
440 * Interrupts
441 * LINE_FLAG -------------------------------+
442 * FRAME_SYNC ----+ |
443 * | |
444 * v v
445 * | Vsync | Vbp | Vactive | Vfp |
446 * ^ ^ ^ ^
447 * | | | |
448 * | | | |
449 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
450 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
451 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
452 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
453 */
454static bool vop_line_flag_irq_is_enabled(struct vop *vop)
455{
456 uint32_t line_flag_irq;
457 unsigned long flags;
458
459 spin_lock_irqsave(&vop->irq_lock, flags);
460
461 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
462
463 spin_unlock_irqrestore(&vop->irq_lock, flags);
464
465 return !!line_flag_irq;
466}
467
468static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
469{
470 unsigned long flags;
471
472 if (WARN_ON(!vop->is_enabled))
473 return;
474
475 spin_lock_irqsave(&vop->irq_lock, flags);
476
477 VOP_CTRL_SET(vop, line_flag_num[0], line_num);
Tomasz Figafa374102016-09-14 21:54:54 +0900478 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
Yakir Yang69c34e42016-07-24 14:57:40 +0800479 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
480
481 spin_unlock_irqrestore(&vop->irq_lock, flags);
482}
483
484static void vop_line_flag_irq_disable(struct vop *vop)
485{
486 unsigned long flags;
487
488 if (WARN_ON(!vop->is_enabled))
489 return;
490
491 spin_lock_irqsave(&vop->irq_lock, flags);
492
493 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
494
495 spin_unlock_irqrestore(&vop->irq_lock, flags);
496}
497
Sean Paul39a9ad82016-08-15 16:12:29 -0700498static int vop_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800499{
500 struct vop *vop = to_vop(crtc);
501 int ret;
502
Mark Yao5d82d1a2015-04-01 13:48:53 +0800503 ret = pm_runtime_get_sync(vop->dev);
504 if (ret < 0) {
505 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
Sean Paul39a9ad82016-08-15 16:12:29 -0700506 goto err_put_pm_runtime;
Mark Yao5d82d1a2015-04-01 13:48:53 +0800507 }
508
Mark Yao2048e322014-08-22 18:36:26 +0800509 ret = clk_enable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700510 if (WARN_ON(ret < 0))
511 goto err_put_pm_runtime;
Mark Yao2048e322014-08-22 18:36:26 +0800512
513 ret = clk_enable(vop->dclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700514 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800515 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +0800516
517 ret = clk_enable(vop->aclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700518 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800519 goto err_disable_dclk;
Mark Yao2048e322014-08-22 18:36:26 +0800520
521 /*
522 * Slave iommu shares power, irq and clock with vop. It was associated
523 * automatically with this master device via common driver code.
524 * Now that we have enabled the clock we attach it to the shared drm
525 * mapping.
526 */
527 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
528 if (ret) {
529 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
530 goto err_disable_aclk;
531 }
532
Mark Yao77faa162015-07-20 16:25:20 +0800533 memcpy(vop->regs, vop->regsbak, vop->len);
Mark Yao52ab7892015-01-22 18:29:57 +0800534 /*
535 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
536 */
537 vop->is_enabled = true;
538
Mark Yao2048e322014-08-22 18:36:26 +0800539 spin_lock(&vop->reg_lock);
540
541 VOP_CTRL_SET(vop, standby, 0);
542
543 spin_unlock(&vop->reg_lock);
544
545 enable_irq(vop->irq);
546
Mark Yaob5f7b752015-11-23 15:21:08 +0800547 drm_crtc_vblank_on(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800548
Sean Paul39a9ad82016-08-15 16:12:29 -0700549 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800550
551err_disable_aclk:
552 clk_disable(vop->aclk);
553err_disable_dclk:
554 clk_disable(vop->dclk);
555err_disable_hclk:
556 clk_disable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700557err_put_pm_runtime:
558 pm_runtime_put_sync(vop->dev);
559 return ret;
Mark Yao2048e322014-08-22 18:36:26 +0800560}
561
Mark Yao0ad36752015-11-09 11:33:16 +0800562static void vop_crtc_disable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800563{
564 struct vop *vop = to_vop(crtc);
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100565 int i;
Mark Yao2048e322014-08-22 18:36:26 +0800566
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200567 WARN_ON(vop->event);
568
Sean Paulb883c9b2016-08-18 12:01:46 -0700569 rockchip_drm_psr_deactivate(&vop->crtc);
570
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100571 /*
572 * We need to make sure that all windows are disabled before we
573 * disable that crtc. Otherwise we might try to scan from a destroyed
574 * buffer later.
575 */
576 for (i = 0; i < vop->data->win_size; i++) {
577 struct vop_win *vop_win = &vop->win[i];
578 const struct vop_win_data *win = vop_win->data;
579
580 spin_lock(&vop->reg_lock);
581 VOP_WIN_SET(vop, win, enable, 0);
582 spin_unlock(&vop->reg_lock);
583 }
584
Mark Yaob5f7b752015-11-23 15:21:08 +0800585 drm_crtc_vblank_off(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800586
Mark Yao2048e322014-08-22 18:36:26 +0800587 /*
Mark Yao10672192015-02-04 13:10:31 +0800588 * Vop standby will take effect at end of current frame,
589 * if dsp hold valid irq happen, it means standby complete.
590 *
591 * we must wait standby complete when we want to disable aclk,
592 * if not, memory bus maybe dead.
Mark Yao2048e322014-08-22 18:36:26 +0800593 */
Mark Yao10672192015-02-04 13:10:31 +0800594 reinit_completion(&vop->dsp_hold_completion);
595 vop_dsp_hold_valid_irq_enable(vop);
596
Mark Yao2048e322014-08-22 18:36:26 +0800597 spin_lock(&vop->reg_lock);
598
599 VOP_CTRL_SET(vop, standby, 1);
600
601 spin_unlock(&vop->reg_lock);
Mark Yao52ab7892015-01-22 18:29:57 +0800602
Mark Yao10672192015-02-04 13:10:31 +0800603 wait_for_completion(&vop->dsp_hold_completion);
Mark Yao2048e322014-08-22 18:36:26 +0800604
Mark Yao10672192015-02-04 13:10:31 +0800605 vop_dsp_hold_valid_irq_disable(vop);
606
607 disable_irq(vop->irq);
608
609 vop->is_enabled = false;
610
611 /*
612 * vop standby complete, so iommu detach is safe.
613 */
Mark Yao2048e322014-08-22 18:36:26 +0800614 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
615
Mark Yao10672192015-02-04 13:10:31 +0800616 clk_disable(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +0800617 clk_disable(vop->aclk);
618 clk_disable(vop->hclk);
Mark Yao5d82d1a2015-04-01 13:48:53 +0800619 pm_runtime_put(vop->dev);
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200620
621 if (crtc->state->event && !crtc->state->active) {
622 spin_lock_irq(&crtc->dev->event_lock);
623 drm_crtc_send_vblank_event(crtc, crtc->state->event);
624 spin_unlock_irq(&crtc->dev->event_lock);
625
626 crtc->state->event = NULL;
627 }
Mark Yao2048e322014-08-22 18:36:26 +0800628}
629
Mark Yao63ebb9f2015-11-30 18:22:42 +0800630static void vop_plane_destroy(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800631{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800632 drm_plane_cleanup(plane);
Mark Yao2048e322014-08-22 18:36:26 +0800633}
634
Mark Yao63ebb9f2015-11-30 18:22:42 +0800635static int vop_plane_atomic_check(struct drm_plane *plane,
636 struct drm_plane_state *state)
Mark Yao2048e322014-08-22 18:36:26 +0800637{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800638 struct drm_crtc *crtc = state->crtc;
John Keeping92915da2016-03-04 11:04:03 +0000639 struct drm_crtc_state *crtc_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800640 struct drm_framebuffer *fb = state->fb;
Mark Yao2048e322014-08-22 18:36:26 +0800641 struct vop_win *vop_win = to_vop_win(plane);
642 const struct vop_win_data *win = vop_win->data;
Mark Yao2048e322014-08-22 18:36:26 +0800643 int ret;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800644 struct drm_rect clip;
Mark Yao4c156c22015-06-26 17:14:46 +0800645 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
646 DRM_PLANE_HELPER_NO_SCALING;
647 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
648 DRM_PLANE_HELPER_NO_SCALING;
Mark Yao2048e322014-08-22 18:36:26 +0800649
Mark Yao63ebb9f2015-11-30 18:22:42 +0800650 if (!crtc || !fb)
Tomasz Figad47a7242016-09-14 21:55:01 +0900651 return 0;
John Keeping92915da2016-03-04 11:04:03 +0000652
653 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
654 if (WARN_ON(!crtc_state))
655 return -EINVAL;
656
Mark Yao63ebb9f2015-11-30 18:22:42 +0800657 clip.x1 = 0;
658 clip.y1 = 0;
John Keeping92915da2016-03-04 11:04:03 +0000659 clip.x2 = crtc_state->adjusted_mode.hdisplay;
660 clip.y2 = crtc_state->adjusted_mode.vdisplay;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800661
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300662 ret = drm_plane_helper_check_state(state, &clip,
663 min_scale, max_scale,
664 true, true);
Mark Yao2048e322014-08-22 18:36:26 +0800665 if (ret)
666 return ret;
667
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300668 if (!state->visible)
Tomasz Figad47a7242016-09-14 21:55:01 +0900669 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800670
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200671 ret = vop_convert_format(fb->format->format);
Tomasz Figad47a7242016-09-14 21:55:01 +0900672 if (ret < 0)
673 return ret;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800674
Mark Yao63ebb9f2015-11-30 18:22:42 +0800675 /*
676 * Src.x1 can be odd when do clip, but yuv plane start point
677 * need align with 2 pixel.
678 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200679 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800680 return -EINVAL;
681
Mark Yao63ebb9f2015-11-30 18:22:42 +0800682 return 0;
683}
684
685static void vop_plane_atomic_disable(struct drm_plane *plane,
686 struct drm_plane_state *old_state)
687{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800688 struct vop_win *vop_win = to_vop_win(plane);
689 const struct vop_win_data *win = vop_win->data;
690 struct vop *vop = to_vop(old_state->crtc);
691
692 if (!old_state->crtc)
693 return;
694
695 spin_lock(&vop->reg_lock);
696
697 VOP_WIN_SET(vop, win, enable, 0);
698
699 spin_unlock(&vop->reg_lock);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800700}
701
702static void vop_plane_atomic_update(struct drm_plane *plane,
703 struct drm_plane_state *old_state)
704{
705 struct drm_plane_state *state = plane->state;
706 struct drm_crtc *crtc = state->crtc;
707 struct vop_win *vop_win = to_vop_win(plane);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800708 const struct vop_win_data *win = vop_win->data;
709 struct vop *vop = to_vop(state->crtc);
710 struct drm_framebuffer *fb = state->fb;
711 unsigned int actual_w, actual_h;
712 unsigned int dsp_stx, dsp_sty;
713 uint32_t act_info, dsp_info, dsp_st;
Ville Syrjäläac920282016-07-26 19:07:01 +0300714 struct drm_rect *src = &state->src;
715 struct drm_rect *dest = &state->dst;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800716 struct drm_gem_object *obj, *uv_obj;
717 struct rockchip_gem_object *rk_obj, *rk_uv_obj;
718 unsigned long offset;
719 dma_addr_t dma_addr;
720 uint32_t val;
721 bool rb_swap;
Tomasz Figad47a7242016-09-14 21:55:01 +0900722 int format;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800723
724 /*
725 * can't update plane when vop is disabled.
726 */
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200727 if (WARN_ON(!crtc))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800728 return;
729
730 if (WARN_ON(!vop->is_enabled))
731 return;
732
Tomasz Figad47a7242016-09-14 21:55:01 +0900733 if (!state->visible) {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800734 vop_plane_atomic_disable(plane, old_state);
735 return;
736 }
Mark Yao2048e322014-08-22 18:36:26 +0800737
738 obj = rockchip_fb_get_gem_obj(fb, 0);
Mark Yao2048e322014-08-22 18:36:26 +0800739 rk_obj = to_rockchip_obj(obj);
740
Mark Yao63ebb9f2015-11-30 18:22:42 +0800741 actual_w = drm_rect_width(src) >> 16;
742 actual_h = drm_rect_height(src) >> 16;
743 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800744
Mark Yao63ebb9f2015-11-30 18:22:42 +0800745 dsp_info = (drm_rect_height(dest) - 1) << 16;
746 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
Mark Yao2048e322014-08-22 18:36:26 +0800747
Mark Yao63ebb9f2015-11-30 18:22:42 +0800748 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
749 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
750 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
Mark Yao2048e322014-08-22 18:36:26 +0800751
Ville Syrjälä353c8592016-12-14 23:30:57 +0200752 offset = (src->x1 >> 16) * fb->format->cpp[0];
Mark Yao63ebb9f2015-11-30 18:22:42 +0800753 offset += (src->y1 >> 16) * fb->pitches[0];
Tomasz Figad47a7242016-09-14 21:55:01 +0900754 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
755
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200756 format = vop_convert_format(fb->format->format);
Mark Yao2048e322014-08-22 18:36:26 +0800757
Mark Yao63ebb9f2015-11-30 18:22:42 +0800758 spin_lock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800759
Tomasz Figad47a7242016-09-14 21:55:01 +0900760 VOP_WIN_SET(vop, win, format, format);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800761 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
Tomasz Figad47a7242016-09-14 21:55:01 +0900762 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200763 if (is_yuv_support(fb->format->format)) {
764 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
765 int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
Ville Syrjälä353c8592016-12-14 23:30:57 +0200766 int bpp = fb->format->cpp[1];
Mark Yao84c7f8c2015-07-20 16:16:49 +0800767
768 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800769 rk_uv_obj = to_rockchip_obj(uv_obj);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800770
Mark Yao63ebb9f2015-11-30 18:22:42 +0800771 offset = (src->x1 >> 16) * bpp / hsub;
772 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800773
Mark Yao63ebb9f2015-11-30 18:22:42 +0800774 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
775 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
776 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800777 }
Mark Yao4c156c22015-06-26 17:14:46 +0800778
779 if (win->phy->scl)
780 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800781 drm_rect_width(dest), drm_rect_height(dest),
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200782 fb->format->format);
Mark Yao4c156c22015-06-26 17:14:46 +0800783
Mark Yao63ebb9f2015-11-30 18:22:42 +0800784 VOP_WIN_SET(vop, win, act_info, act_info);
785 VOP_WIN_SET(vop, win, dsp_info, dsp_info);
786 VOP_WIN_SET(vop, win, dsp_st, dsp_st);
Mark Yao4c156c22015-06-26 17:14:46 +0800787
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200788 rb_swap = has_rb_swapped(fb->format->format);
Tomasz Figa85a359f2015-05-11 19:55:39 +0900789 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
Mark Yao2048e322014-08-22 18:36:26 +0800790
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200791 if (is_alpha_support(fb->format->format)) {
Mark Yao2048e322014-08-22 18:36:26 +0800792 VOP_WIN_SET(vop, win, dst_alpha_ctl,
793 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
794 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
795 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
796 SRC_BLEND_M0(ALPHA_PER_PIX) |
797 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
798 SRC_FACTOR_M0(ALPHA_ONE);
799 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
800 } else {
801 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
802 }
803
804 VOP_WIN_SET(vop, win, enable, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800805 spin_unlock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800806}
807
Mark Yao63ebb9f2015-11-30 18:22:42 +0800808static const struct drm_plane_helper_funcs plane_helper_funcs = {
809 .atomic_check = vop_plane_atomic_check,
810 .atomic_update = vop_plane_atomic_update,
811 .atomic_disable = vop_plane_atomic_disable,
812};
813
Mark Yao2048e322014-08-22 18:36:26 +0800814static const struct drm_plane_funcs vop_plane_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800815 .update_plane = drm_atomic_helper_update_plane,
816 .disable_plane = drm_atomic_helper_disable_plane,
Mark Yao2048e322014-08-22 18:36:26 +0800817 .destroy = vop_plane_destroy,
Tomasz Figad47a7242016-09-14 21:55:01 +0900818 .reset = drm_atomic_helper_plane_reset,
819 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
820 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +0800821};
822
Mark Yao2048e322014-08-22 18:36:26 +0800823static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
824{
825 struct vop *vop = to_vop(crtc);
826 unsigned long flags;
827
Mark Yao63ebb9f2015-11-30 18:22:42 +0800828 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800829 return -EPERM;
830
831 spin_lock_irqsave(&vop->irq_lock, flags);
832
Tomasz Figafa374102016-09-14 21:54:54 +0900833 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800834 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800835
836 spin_unlock_irqrestore(&vop->irq_lock, flags);
837
838 return 0;
839}
840
841static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
842{
843 struct vop *vop = to_vop(crtc);
844 unsigned long flags;
845
Mark Yao63ebb9f2015-11-30 18:22:42 +0800846 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800847 return;
Mark Yao31e980c2015-01-22 14:37:56 +0800848
Mark Yao2048e322014-08-22 18:36:26 +0800849 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +0800850
851 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
852
Mark Yao2048e322014-08-22 18:36:26 +0800853 spin_unlock_irqrestore(&vop->irq_lock, flags);
854}
855
Mark Yao2048e322014-08-22 18:36:26 +0800856static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
857 const struct drm_display_mode *mode,
858 struct drm_display_mode *adjusted_mode)
859{
Chris Zhongb59b8de2016-01-06 12:03:53 +0800860 struct vop *vop = to_vop(crtc);
861
Chris Zhongb59b8de2016-01-06 12:03:53 +0800862 adjusted_mode->clock =
863 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
864
Mark Yao2048e322014-08-22 18:36:26 +0800865 return true;
866}
867
Mark Yao63ebb9f2015-11-30 18:22:42 +0800868static void vop_crtc_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800869{
870 struct vop *vop = to_vop(crtc);
Mark Yao4e257d92016-04-20 10:41:42 +0800871 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800872 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
Mark Yao2048e322014-08-22 18:36:26 +0800873 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
874 u16 hdisplay = adjusted_mode->hdisplay;
875 u16 htotal = adjusted_mode->htotal;
876 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
877 u16 hact_end = hact_st + hdisplay;
878 u16 vdisplay = adjusted_mode->vdisplay;
879 u16 vtotal = adjusted_mode->vtotal;
880 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
881 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
882 u16 vact_end = vact_st + vdisplay;
Mark Yao0a63bfd2016-04-20 14:18:16 +0800883 uint32_t pin_pol, val;
Sean Paul39a9ad82016-08-15 16:12:29 -0700884 int ret;
Mark Yao2048e322014-08-22 18:36:26 +0800885
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200886 WARN_ON(vop->event);
887
Sean Paul39a9ad82016-08-15 16:12:29 -0700888 ret = vop_enable(crtc);
889 if (ret) {
890 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
891 return;
892 }
893
Mark Yao2048e322014-08-22 18:36:26 +0800894 /*
Mark Yaoce3887e2015-12-16 18:08:17 +0800895 * If dclk rate is zero, mean that scanout is stop,
896 * we don't need wait any more.
Mark Yao2048e322014-08-22 18:36:26 +0800897 */
Mark Yaoce3887e2015-12-16 18:08:17 +0800898 if (clk_get_rate(vop->dclk)) {
899 /*
900 * Rk3288 vop timing register is immediately, when configure
901 * display timing on display time, may cause tearing.
902 *
903 * Vop standby will take effect at end of current frame,
904 * if dsp hold valid irq happen, it means standby complete.
905 *
906 * mode set:
907 * standby and wait complete --> |----
908 * | display time
909 * |----
910 * |---> dsp hold irq
911 * configure display timing --> |
912 * standby exit |
913 * | new frame start.
914 */
915
916 reinit_completion(&vop->dsp_hold_completion);
917 vop_dsp_hold_valid_irq_enable(vop);
918
919 spin_lock(&vop->reg_lock);
920
921 VOP_CTRL_SET(vop, standby, 1);
922
923 spin_unlock(&vop->reg_lock);
924
925 wait_for_completion(&vop->dsp_hold_completion);
926
927 vop_dsp_hold_valid_irq_disable(vop);
928 }
Mark Yao2048e322014-08-22 18:36:26 +0800929
Mark Yao0a63bfd2016-04-20 14:18:16 +0800930 pin_pol = 0x8;
931 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
932 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
933 VOP_CTRL_SET(vop, pin_pol, pin_pol);
934
Mark Yao4e257d92016-04-20 10:41:42 +0800935 switch (s->output_type) {
936 case DRM_MODE_CONNECTOR_LVDS:
937 VOP_CTRL_SET(vop, rgb_en, 1);
Mark Yao0a63bfd2016-04-20 14:18:16 +0800938 VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800939 break;
940 case DRM_MODE_CONNECTOR_eDP:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800941 VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800942 VOP_CTRL_SET(vop, edp_en, 1);
943 break;
944 case DRM_MODE_CONNECTOR_HDMIA:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800945 VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800946 VOP_CTRL_SET(vop, hdmi_en, 1);
947 break;
948 case DRM_MODE_CONNECTOR_DSI:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800949 VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800950 VOP_CTRL_SET(vop, mipi_en, 1);
951 break;
952 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400953 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
954 s->output_type);
Mark Yao4e257d92016-04-20 10:41:42 +0800955 }
956 VOP_CTRL_SET(vop, out_mode, s->output_mode);
Mark Yao2048e322014-08-22 18:36:26 +0800957
958 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
959 val = hact_st << 16;
960 val |= hact_end;
961 VOP_CTRL_SET(vop, hact_st_end, val);
962 VOP_CTRL_SET(vop, hpost_st_end, val);
963
964 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
965 val = vact_st << 16;
966 val |= vact_end;
967 VOP_CTRL_SET(vop, vact_st_end, val);
968 VOP_CTRL_SET(vop, vpost_st_end, val);
969
Mark Yao2048e322014-08-22 18:36:26 +0800970 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
Mark Yaoce3887e2015-12-16 18:08:17 +0800971
972 VOP_CTRL_SET(vop, standby, 0);
Sean Paulb883c9b2016-08-18 12:01:46 -0700973
974 rockchip_drm_psr_activate(&vop->crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800975}
Mark Yao2048e322014-08-22 18:36:26 +0800976
Tomasz Figa7caecdb2016-09-14 21:54:56 +0900977static bool vop_fs_irq_is_pending(struct vop *vop)
978{
979 return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
980}
981
982static void vop_wait_for_irq_handler(struct vop *vop)
983{
984 bool pending;
985 int ret;
986
987 /*
988 * Spin until frame start interrupt status bit goes low, which means
989 * that interrupt handler was invoked and cleared it. The timeout of
990 * 10 msecs is really too long, but it is just a safety measure if
991 * something goes really wrong. The wait will only happen in the very
992 * unlikely case of a vblank happening exactly at the same time and
993 * shouldn't exceed microseconds range.
994 */
995 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
996 !pending, 0, 10 * 1000);
997 if (ret)
998 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
999
1000 synchronize_irq(vop->irq);
1001}
1002
Mark Yao63ebb9f2015-11-30 18:22:42 +08001003static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1004 struct drm_crtc_state *old_crtc_state)
1005{
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001006 struct drm_atomic_state *old_state = old_crtc_state->state;
1007 struct drm_plane_state *old_plane_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001008 struct vop *vop = to_vop(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001009 struct drm_plane *plane;
1010 int i;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001011
1012 if (WARN_ON(!vop->is_enabled))
1013 return;
1014
1015 spin_lock(&vop->reg_lock);
1016
1017 vop_cfg_done(vop);
1018
1019 spin_unlock(&vop->reg_lock);
Tomasz Figa7caecdb2016-09-14 21:54:56 +09001020
1021 /*
1022 * There is a (rather unlikely) possiblity that a vblank interrupt
1023 * fired before we set the cfg_done bit. To avoid spuriously
1024 * signalling flip completion we need to wait for it to finish.
1025 */
1026 vop_wait_for_irq_handler(vop);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001027
Tomasz Figa41ee4362016-09-14 21:55:00 +09001028 spin_lock_irq(&crtc->dev->event_lock);
1029 if (crtc->state->event) {
1030 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1031 WARN_ON(vop->event);
1032
1033 vop->event = crtc->state->event;
1034 crtc->state->event = NULL;
1035 }
1036 spin_unlock_irq(&crtc->dev->event_lock);
1037
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001038 for_each_plane_in_state(old_state, plane, old_plane_state, i) {
1039 if (!old_plane_state->fb)
1040 continue;
1041
1042 if (old_plane_state->fb == plane->state->fb)
1043 continue;
1044
1045 drm_framebuffer_reference(old_plane_state->fb);
1046 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1047 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1048 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1049 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001050}
1051
1052static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1053 struct drm_crtc_state *old_crtc_state)
1054{
Sean Paulb883c9b2016-08-18 12:01:46 -07001055 rockchip_drm_psr_flush(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001056}
1057
Mark Yao2048e322014-08-22 18:36:26 +08001058static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
Mark Yao0ad36752015-11-09 11:33:16 +08001059 .enable = vop_crtc_enable,
1060 .disable = vop_crtc_disable,
Mark Yao2048e322014-08-22 18:36:26 +08001061 .mode_fixup = vop_crtc_mode_fixup,
Mark Yao63ebb9f2015-11-30 18:22:42 +08001062 .atomic_flush = vop_crtc_atomic_flush,
1063 .atomic_begin = vop_crtc_atomic_begin,
Mark Yao2048e322014-08-22 18:36:26 +08001064};
1065
Mark Yao2048e322014-08-22 18:36:26 +08001066static void vop_crtc_destroy(struct drm_crtc *crtc)
1067{
1068 drm_crtc_cleanup(crtc);
1069}
1070
John Keepingdc0b4082016-07-14 16:29:15 +01001071static void vop_crtc_reset(struct drm_crtc *crtc)
1072{
1073 if (crtc->state)
1074 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1075 kfree(crtc->state);
1076
1077 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
1078 if (crtc->state)
1079 crtc->state->crtc = crtc;
1080}
1081
Mark Yao4e257d92016-04-20 10:41:42 +08001082static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
1083{
1084 struct rockchip_crtc_state *rockchip_state;
1085
1086 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
1087 if (!rockchip_state)
1088 return NULL;
1089
1090 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
1091 return &rockchip_state->base;
1092}
1093
1094static void vop_crtc_destroy_state(struct drm_crtc *crtc,
1095 struct drm_crtc_state *state)
1096{
1097 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
1098
Daniel Vetterec2dc6a2016-05-09 16:34:09 +02001099 __drm_atomic_helper_crtc_destroy_state(&s->base);
Mark Yao4e257d92016-04-20 10:41:42 +08001100 kfree(s);
1101}
1102
Mark Yao2048e322014-08-22 18:36:26 +08001103static const struct drm_crtc_funcs vop_crtc_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001104 .set_config = drm_atomic_helper_set_config,
1105 .page_flip = drm_atomic_helper_page_flip,
Mark Yao2048e322014-08-22 18:36:26 +08001106 .destroy = vop_crtc_destroy,
John Keepingdc0b4082016-07-14 16:29:15 +01001107 .reset = vop_crtc_reset,
Mark Yao4e257d92016-04-20 10:41:42 +08001108 .atomic_duplicate_state = vop_crtc_duplicate_state,
1109 .atomic_destroy_state = vop_crtc_destroy_state,
Shawn Guoc3605df2017-02-07 17:16:29 +08001110 .enable_vblank = vop_crtc_enable_vblank,
1111 .disable_vblank = vop_crtc_disable_vblank,
Mark Yao2048e322014-08-22 18:36:26 +08001112};
1113
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001114static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
1115{
1116 struct vop *vop = container_of(work, struct vop, fb_unref_work);
1117 struct drm_framebuffer *fb = val;
1118
1119 drm_crtc_vblank_put(&vop->crtc);
1120 drm_framebuffer_unreference(fb);
1121}
1122
Mark Yao63ebb9f2015-11-30 18:22:42 +08001123static void vop_handle_vblank(struct vop *vop)
1124{
1125 struct drm_device *drm = vop->drm_dev;
1126 struct drm_crtc *crtc = &vop->crtc;
1127 unsigned long flags;
Mark Yao2048e322014-08-22 18:36:26 +08001128
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001129 spin_lock_irqsave(&drm->event_lock, flags);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001130 if (vop->event) {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001131 drm_crtc_send_vblank_event(crtc, vop->event);
Sean Paul5b680402016-08-10 16:24:39 -04001132 drm_crtc_vblank_put(crtc);
Tomasz Figa646ec682016-09-14 21:54:59 +09001133 vop->event = NULL;
Sean Paul5b680402016-08-10 16:24:39 -04001134 }
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001135 spin_unlock_irqrestore(&drm->event_lock, flags);
1136
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001137 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
1138 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
Mark Yao2048e322014-08-22 18:36:26 +08001139}
1140
1141static irqreturn_t vop_isr(int irq, void *data)
1142{
1143 struct vop *vop = data;
Mark Yaob5f7b752015-11-23 15:21:08 +08001144 struct drm_crtc *crtc = &vop->crtc;
Mark Yaodbb3d942015-12-15 08:36:55 +08001145 uint32_t active_irqs;
Mark Yao2048e322014-08-22 18:36:26 +08001146 unsigned long flags;
Mark Yao10672192015-02-04 13:10:31 +08001147 int ret = IRQ_NONE;
Mark Yao2048e322014-08-22 18:36:26 +08001148
1149 /*
Mark Yaodbb3d942015-12-15 08:36:55 +08001150 * interrupt register has interrupt status, enable and clear bits, we
Mark Yao2048e322014-08-22 18:36:26 +08001151 * must hold irq_lock to avoid a race with enable/disable_vblank().
1152 */
1153 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +08001154
1155 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
Mark Yao2048e322014-08-22 18:36:26 +08001156 /* Clear all active interrupt sources */
1157 if (active_irqs)
Mark Yaodbb3d942015-12-15 08:36:55 +08001158 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
1159
Mark Yao2048e322014-08-22 18:36:26 +08001160 spin_unlock_irqrestore(&vop->irq_lock, flags);
1161
1162 /* This is expected for vop iommu irqs, since the irq is shared */
1163 if (!active_irqs)
1164 return IRQ_NONE;
1165
Mark Yao10672192015-02-04 13:10:31 +08001166 if (active_irqs & DSP_HOLD_VALID_INTR) {
1167 complete(&vop->dsp_hold_completion);
1168 active_irqs &= ~DSP_HOLD_VALID_INTR;
1169 ret = IRQ_HANDLED;
Mark Yao2048e322014-08-22 18:36:26 +08001170 }
1171
Yakir Yang69c34e42016-07-24 14:57:40 +08001172 if (active_irqs & LINE_FLAG_INTR) {
1173 complete(&vop->line_flag_completion);
1174 active_irqs &= ~LINE_FLAG_INTR;
1175 ret = IRQ_HANDLED;
1176 }
1177
Mark Yao10672192015-02-04 13:10:31 +08001178 if (active_irqs & FS_INTR) {
Mark Yaob5f7b752015-11-23 15:21:08 +08001179 drm_crtc_handle_vblank(crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001180 vop_handle_vblank(vop);
Mark Yao10672192015-02-04 13:10:31 +08001181 active_irqs &= ~FS_INTR;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001182 ret = IRQ_HANDLED;
Mark Yao10672192015-02-04 13:10:31 +08001183 }
Mark Yao2048e322014-08-22 18:36:26 +08001184
Mark Yao10672192015-02-04 13:10:31 +08001185 /* Unhandled irqs are spurious. */
1186 if (active_irqs)
Sean Paulee4d7892016-08-12 13:00:54 -04001187 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
1188 active_irqs);
Mark Yao10672192015-02-04 13:10:31 +08001189
1190 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001191}
1192
1193static int vop_create_crtc(struct vop *vop)
1194{
1195 const struct vop_data *vop_data = vop->data;
1196 struct device *dev = vop->dev;
1197 struct drm_device *drm_dev = vop->drm_dev;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001198 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001199 struct drm_crtc *crtc = &vop->crtc;
1200 struct device_node *port;
1201 int ret;
1202 int i;
1203
1204 /*
1205 * Create drm_plane for primary and cursor planes first, since we need
1206 * to pass them to drm_crtc_init_with_planes, which sets the
1207 * "possible_crtcs" to the newly initialized crtc.
1208 */
1209 for (i = 0; i < vop_data->win_size; i++) {
1210 struct vop_win *vop_win = &vop->win[i];
1211 const struct vop_win_data *win_data = vop_win->data;
1212
1213 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1214 win_data->type != DRM_PLANE_TYPE_CURSOR)
1215 continue;
1216
1217 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1218 0, &vop_plane_funcs,
1219 win_data->phy->data_formats,
1220 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001221 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001222 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001223 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
1224 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001225 goto err_cleanup_planes;
1226 }
1227
1228 plane = &vop_win->base;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001229 drm_plane_helper_add(plane, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001230 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1231 primary = plane;
1232 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1233 cursor = plane;
1234 }
1235
1236 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
Ville Syrjäläf9882872015-12-09 16:19:31 +02001237 &vop_crtc_funcs, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001238 if (ret)
Douglas Anderson328b51c2016-03-07 14:00:52 -08001239 goto err_cleanup_planes;
Mark Yao2048e322014-08-22 18:36:26 +08001240
1241 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1242
1243 /*
1244 * Create drm_planes for overlay windows with possible_crtcs restricted
1245 * to the newly created crtc.
1246 */
1247 for (i = 0; i < vop_data->win_size; i++) {
1248 struct vop_win *vop_win = &vop->win[i];
1249 const struct vop_win_data *win_data = vop_win->data;
1250 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1251
1252 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1253 continue;
1254
1255 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1256 possible_crtcs,
1257 &vop_plane_funcs,
1258 win_data->phy->data_formats,
1259 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001260 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001261 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001262 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
1263 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001264 goto err_cleanup_crtc;
1265 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001266 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001267 }
1268
1269 port = of_get_child_by_name(dev->of_node, "port");
1270 if (!port) {
Sean Paulee4d7892016-08-12 13:00:54 -04001271 DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
1272 dev->of_node->full_name);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001273 ret = -ENOENT;
Mark Yao2048e322014-08-22 18:36:26 +08001274 goto err_cleanup_crtc;
1275 }
1276
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001277 drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
1278 vop_fb_unref_worker);
1279
Mark Yao10672192015-02-04 13:10:31 +08001280 init_completion(&vop->dsp_hold_completion);
Yakir Yang69c34e42016-07-24 14:57:40 +08001281 init_completion(&vop->line_flag_completion);
Mark Yao2048e322014-08-22 18:36:26 +08001282 crtc->port = port;
Mark Yao2048e322014-08-22 18:36:26 +08001283
1284 return 0;
1285
1286err_cleanup_crtc:
1287 drm_crtc_cleanup(crtc);
1288err_cleanup_planes:
Douglas Anderson328b51c2016-03-07 14:00:52 -08001289 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1290 head)
Mark Yao2048e322014-08-22 18:36:26 +08001291 drm_plane_cleanup(plane);
1292 return ret;
1293}
1294
1295static void vop_destroy_crtc(struct vop *vop)
1296{
1297 struct drm_crtc *crtc = &vop->crtc;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001298 struct drm_device *drm_dev = vop->drm_dev;
1299 struct drm_plane *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001300
Mark Yao2048e322014-08-22 18:36:26 +08001301 of_node_put(crtc->port);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001302
1303 /*
1304 * We need to cleanup the planes now. Why?
1305 *
1306 * The planes are "&vop->win[i].base". That means the memory is
1307 * all part of the big "struct vop" chunk of memory. That memory
1308 * was devm allocated and associated with this component. We need to
1309 * free it ourselves before vop_unbind() finishes.
1310 */
1311 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1312 head)
1313 vop_plane_destroy(plane);
1314
1315 /*
1316 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1317 * references the CRTC.
1318 */
Mark Yao2048e322014-08-22 18:36:26 +08001319 drm_crtc_cleanup(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001320 drm_flip_work_cleanup(&vop->fb_unref_work);
Mark Yao2048e322014-08-22 18:36:26 +08001321}
1322
1323static int vop_initial(struct vop *vop)
1324{
1325 const struct vop_data *vop_data = vop->data;
1326 const struct vop_reg_data *init_table = vop_data->init_table;
1327 struct reset_control *ahb_rst;
1328 int i, ret;
1329
1330 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1331 if (IS_ERR(vop->hclk)) {
1332 dev_err(vop->dev, "failed to get hclk source\n");
1333 return PTR_ERR(vop->hclk);
1334 }
1335 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1336 if (IS_ERR(vop->aclk)) {
1337 dev_err(vop->dev, "failed to get aclk source\n");
1338 return PTR_ERR(vop->aclk);
1339 }
1340 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1341 if (IS_ERR(vop->dclk)) {
1342 dev_err(vop->dev, "failed to get dclk source\n");
1343 return PTR_ERR(vop->dclk);
1344 }
1345
Mark Yao2048e322014-08-22 18:36:26 +08001346 ret = clk_prepare(vop->dclk);
1347 if (ret < 0) {
1348 dev_err(vop->dev, "failed to prepare dclk\n");
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001349 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001350 }
1351
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001352 /* Enable both the hclk and aclk to setup the vop */
1353 ret = clk_prepare_enable(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001354 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001355 dev_err(vop->dev, "failed to prepare/enable hclk\n");
Mark Yao2048e322014-08-22 18:36:26 +08001356 goto err_unprepare_dclk;
1357 }
1358
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001359 ret = clk_prepare_enable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001360 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001361 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1362 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +08001363 }
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001364
Mark Yao2048e322014-08-22 18:36:26 +08001365 /*
1366 * do hclk_reset, reset all vop registers.
1367 */
1368 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1369 if (IS_ERR(ahb_rst)) {
1370 dev_err(vop->dev, "failed to get ahb reset\n");
1371 ret = PTR_ERR(ahb_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001372 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001373 }
1374 reset_control_assert(ahb_rst);
1375 usleep_range(10, 20);
1376 reset_control_deassert(ahb_rst);
1377
1378 memcpy(vop->regsbak, vop->regs, vop->len);
1379
1380 for (i = 0; i < vop_data->table_size; i++)
1381 vop_writel(vop, init_table[i].offset, init_table[i].value);
1382
1383 for (i = 0; i < vop_data->win_size; i++) {
1384 const struct vop_win_data *win = &vop_data->win[i];
1385
1386 VOP_WIN_SET(vop, win, enable, 0);
1387 }
1388
1389 vop_cfg_done(vop);
1390
1391 /*
1392 * do dclk_reset, let all config take affect.
1393 */
1394 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1395 if (IS_ERR(vop->dclk_rst)) {
1396 dev_err(vop->dev, "failed to get dclk reset\n");
1397 ret = PTR_ERR(vop->dclk_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001398 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001399 }
1400 reset_control_assert(vop->dclk_rst);
1401 usleep_range(10, 20);
1402 reset_control_deassert(vop->dclk_rst);
1403
1404 clk_disable(vop->hclk);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001405 clk_disable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001406
Mark Yao31e980c2015-01-22 14:37:56 +08001407 vop->is_enabled = false;
Mark Yao2048e322014-08-22 18:36:26 +08001408
1409 return 0;
1410
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001411err_disable_aclk:
1412 clk_disable_unprepare(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001413err_disable_hclk:
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001414 clk_disable_unprepare(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001415err_unprepare_dclk:
1416 clk_unprepare(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +08001417 return ret;
1418}
1419
1420/*
1421 * Initialize the vop->win array elements.
1422 */
1423static void vop_win_init(struct vop *vop)
1424{
1425 const struct vop_data *vop_data = vop->data;
1426 unsigned int i;
1427
1428 for (i = 0; i < vop_data->win_size; i++) {
1429 struct vop_win *vop_win = &vop->win[i];
1430 const struct vop_win_data *win_data = &vop_data->win[i];
1431
1432 vop_win->data = win_data;
1433 vop_win->vop = vop;
Mark Yao2048e322014-08-22 18:36:26 +08001434 }
1435}
1436
Yakir Yang69c34e42016-07-24 14:57:40 +08001437/**
1438 * rockchip_drm_wait_line_flag - acqiure the give line flag event
1439 * @crtc: CRTC to enable line flag
1440 * @line_num: interested line number
1441 * @mstimeout: millisecond for timeout
1442 *
1443 * Driver would hold here until the interested line flag interrupt have
1444 * happened or timeout to wait.
1445 *
1446 * Returns:
1447 * Zero on success, negative errno on failure.
1448 */
1449int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
1450 unsigned int mstimeout)
1451{
1452 struct vop *vop = to_vop(crtc);
1453 unsigned long jiffies_left;
1454
1455 if (!crtc || !vop->is_enabled)
1456 return -ENODEV;
1457
1458 if (line_num > crtc->mode.vtotal || mstimeout <= 0)
1459 return -EINVAL;
1460
1461 if (vop_line_flag_irq_is_enabled(vop))
1462 return -EBUSY;
1463
1464 reinit_completion(&vop->line_flag_completion);
1465 vop_line_flag_irq_enable(vop, line_num);
1466
1467 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
1468 msecs_to_jiffies(mstimeout));
1469 vop_line_flag_irq_disable(vop);
1470
1471 if (jiffies_left == 0) {
1472 dev_err(vop->dev, "Timeout waiting for IRQ\n");
1473 return -ETIMEDOUT;
1474 }
1475
1476 return 0;
1477}
1478EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
1479
Mark Yao2048e322014-08-22 18:36:26 +08001480static int vop_bind(struct device *dev, struct device *master, void *data)
1481{
1482 struct platform_device *pdev = to_platform_device(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001483 const struct vop_data *vop_data;
1484 struct drm_device *drm_dev = data;
1485 struct vop *vop;
1486 struct resource *res;
1487 size_t alloc_size;
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001488 int ret, irq;
Mark Yao2048e322014-08-22 18:36:26 +08001489
Mark Yaoa67719d2015-12-15 08:58:26 +08001490 vop_data = of_device_get_match_data(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001491 if (!vop_data)
1492 return -ENODEV;
1493
1494 /* Allocate vop struct and its vop_win array */
1495 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1496 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1497 if (!vop)
1498 return -ENOMEM;
1499
1500 vop->dev = dev;
1501 vop->data = vop_data;
1502 vop->drm_dev = drm_dev;
1503 dev_set_drvdata(dev, vop);
1504
1505 vop_win_init(vop);
1506
1507 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1508 vop->len = resource_size(res);
1509 vop->regs = devm_ioremap_resource(dev, res);
1510 if (IS_ERR(vop->regs))
1511 return PTR_ERR(vop->regs);
1512
1513 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1514 if (!vop->regsbak)
1515 return -ENOMEM;
1516
1517 ret = vop_initial(vop);
1518 if (ret < 0) {
1519 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1520 return ret;
1521 }
1522
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001523 irq = platform_get_irq(pdev, 0);
1524 if (irq < 0) {
Mark Yao2048e322014-08-22 18:36:26 +08001525 dev_err(dev, "cannot find irq for vop\n");
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001526 return irq;
Mark Yao2048e322014-08-22 18:36:26 +08001527 }
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001528 vop->irq = (unsigned int)irq;
Mark Yao2048e322014-08-22 18:36:26 +08001529
1530 spin_lock_init(&vop->reg_lock);
1531 spin_lock_init(&vop->irq_lock);
1532
1533 mutex_init(&vop->vsync_mutex);
1534
Mark Yao63ebb9f2015-11-30 18:22:42 +08001535 ret = devm_request_irq(dev, vop->irq, vop_isr,
1536 IRQF_SHARED, dev_name(dev), vop);
Mark Yao2048e322014-08-22 18:36:26 +08001537 if (ret)
1538 return ret;
1539
1540 /* IRQ is initially disabled; it gets enabled in power_on */
1541 disable_irq(vop->irq);
1542
1543 ret = vop_create_crtc(vop);
1544 if (ret)
Sean Paul8c763c92016-09-16 14:22:03 -04001545 goto err_enable_irq;
Mark Yao2048e322014-08-22 18:36:26 +08001546
1547 pm_runtime_enable(&pdev->dev);
Yakir Yang5182c1a2016-07-24 14:57:44 +08001548
Mark Yao2048e322014-08-22 18:36:26 +08001549 return 0;
Sean Paul8c763c92016-09-16 14:22:03 -04001550
1551err_enable_irq:
1552 enable_irq(vop->irq); /* To balance out the disable_irq above */
1553 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001554}
1555
1556static void vop_unbind(struct device *dev, struct device *master, void *data)
1557{
1558 struct vop *vop = dev_get_drvdata(dev);
1559
1560 pm_runtime_disable(dev);
1561 vop_destroy_crtc(vop);
1562}
1563
Mark Yaoa67719d2015-12-15 08:58:26 +08001564const struct component_ops vop_component_ops = {
Mark Yao2048e322014-08-22 18:36:26 +08001565 .bind = vop_bind,
1566 .unbind = vop_unbind,
1567};
Stephen Rothwell54255e82015-12-31 13:40:11 +11001568EXPORT_SYMBOL_GPL(vop_component_ops);