blob: 4f8b8631956a064d2143061ac28bdc4deb6dea1a [file] [log] [blame]
Mark Yao2048e322014-08-22 18:36:26 +08001/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
Mark Yao63ebb9f2015-11-30 18:22:42 +080017#include <drm/drm_atomic.h>
Mark Yao2048e322014-08-22 18:36:26 +080018#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
Tomasz Figa47a7eb42016-09-14 21:54:57 +090020#include <drm/drm_flip_work.h>
Mark Yao2048e322014-08-22 18:36:26 +080021#include <drm/drm_plane_helper.h>
22
23#include <linux/kernel.h>
Paul Gortmaker00fe6142015-05-01 20:02:30 -040024#include <linux/module.h>
Mark Yao2048e322014-08-22 18:36:26 +080025#include <linux/platform_device.h>
26#include <linux/clk.h>
Tomasz Figa7caecdb2016-09-14 21:54:56 +090027#include <linux/iopoll.h>
Mark Yao2048e322014-08-22 18:36:26 +080028#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/component.h>
32
33#include <linux/reset.h>
34#include <linux/delay.h>
35
36#include "rockchip_drm_drv.h"
37#include "rockchip_drm_gem.h"
38#include "rockchip_drm_fb.h"
Yakir Yang5182c1a2016-07-24 14:57:44 +080039#include "rockchip_drm_psr.h"
Mark Yao2048e322014-08-22 18:36:26 +080040#include "rockchip_drm_vop.h"
41
Mark Yaod49463e2016-04-20 14:18:15 +080042#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
43 vop_mask_write(x, off, mask, shift, v, write_mask, true)
44
45#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
46 vop_mask_write(x, off, mask, shift, v, write_mask, false)
Mark Yao2048e322014-08-22 18:36:26 +080047
48#define REG_SET(x, base, reg, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080049 __REG_SET_##mode(x, base + reg.offset, \
50 reg.mask, reg.shift, v, reg.write_mask)
John Keepingc7647f82016-01-12 18:05:18 +000051#define REG_SET_MASK(x, base, reg, mask, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080052 __REG_SET_##mode(x, base + reg.offset, \
53 mask, reg.shift, v, reg.write_mask)
Mark Yao2048e322014-08-22 18:36:26 +080054
55#define VOP_WIN_SET(x, win, name, v) \
56 REG_SET(x, win->base, win->phy->name, v, RELAXED)
Mark Yao4c156c22015-06-26 17:14:46 +080057#define VOP_SCL_SET(x, win, name, v) \
58 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
Mark Yao1194fff2015-12-15 09:08:43 +080059#define VOP_SCL_SET_EXT(x, win, name, v) \
60 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
Mark Yao2048e322014-08-22 18:36:26 +080061#define VOP_CTRL_SET(x, name, v) \
62 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
63
Mark Yaodbb3d942015-12-15 08:36:55 +080064#define VOP_INTR_GET(vop, name) \
65 vop_read_reg(vop, 0, &vop->data->ctrl->name)
66
John Keepingc7647f82016-01-12 18:05:18 +000067#define VOP_INTR_SET(vop, name, mask, v) \
68 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
Mark Yaodbb3d942015-12-15 08:36:55 +080069#define VOP_INTR_SET_TYPE(vop, name, type, v) \
70 do { \
John Keepingc7647f82016-01-12 18:05:18 +000071 int i, reg = 0, mask = 0; \
Mark Yaodbb3d942015-12-15 08:36:55 +080072 for (i = 0; i < vop->data->intr->nintrs; i++) { \
John Keepingc7647f82016-01-12 18:05:18 +000073 if (vop->data->intr->intrs[i] & type) { \
Mark Yaodbb3d942015-12-15 08:36:55 +080074 reg |= (v) << i; \
John Keepingc7647f82016-01-12 18:05:18 +000075 mask |= 1 << i; \
76 } \
Mark Yaodbb3d942015-12-15 08:36:55 +080077 } \
John Keepingc7647f82016-01-12 18:05:18 +000078 VOP_INTR_SET(vop, name, mask, reg); \
Mark Yaodbb3d942015-12-15 08:36:55 +080079 } while (0)
80#define VOP_INTR_GET_TYPE(vop, name, type) \
81 vop_get_intr_type(vop, &vop->data->intr->name, type)
82
Mark Yao2048e322014-08-22 18:36:26 +080083#define VOP_WIN_GET(x, win, name) \
84 vop_read_reg(x, win->base, &win->phy->name)
85
86#define VOP_WIN_GET_YRGBADDR(vop, win) \
87 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
88
89#define to_vop(x) container_of(x, struct vop, crtc)
90#define to_vop_win(x) container_of(x, struct vop_win, base)
91
Tomasz Figa47a7eb42016-09-14 21:54:57 +090092enum vop_pending {
93 VOP_PENDING_FB_UNREF,
94};
95
Mark Yao2048e322014-08-22 18:36:26 +080096struct vop_win {
97 struct drm_plane base;
98 const struct vop_win_data *data;
99 struct vop *vop;
Mark Yao2048e322014-08-22 18:36:26 +0800100};
101
102struct vop {
103 struct drm_crtc crtc;
104 struct device *dev;
105 struct drm_device *drm_dev;
Mark Yao31e980c2015-01-22 14:37:56 +0800106 bool is_enabled;
Mark Yao2048e322014-08-22 18:36:26 +0800107
Mark Yao2048e322014-08-22 18:36:26 +0800108 /* mutex vsync_ work */
109 struct mutex vsync_mutex;
110 bool vsync_work_pending;
Mark Yao10672192015-02-04 13:10:31 +0800111 struct completion dsp_hold_completion;
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200112
113 /* protected by dev->event_lock */
Mark Yao63ebb9f2015-11-30 18:22:42 +0800114 struct drm_pending_vblank_event *event;
Mark Yao2048e322014-08-22 18:36:26 +0800115
Tomasz Figa47a7eb42016-09-14 21:54:57 +0900116 struct drm_flip_work fb_unref_work;
117 unsigned long pending;
118
Yakir Yang69c34e42016-07-24 14:57:40 +0800119 struct completion line_flag_completion;
120
Mark Yao2048e322014-08-22 18:36:26 +0800121 const struct vop_data *data;
122
123 uint32_t *regsbak;
124 void __iomem *regs;
125
126 /* physical map length of vop register */
127 uint32_t len;
128
129 /* one time only one process allowed to config the register */
130 spinlock_t reg_lock;
131 /* lock vop irq reg */
132 spinlock_t irq_lock;
133
134 unsigned int irq;
135
136 /* vop AHP clk */
137 struct clk *hclk;
138 /* vop dclk */
139 struct clk *dclk;
140 /* vop share memory frequency */
141 struct clk *aclk;
142
143 /* vop dclk reset */
144 struct reset_control *dclk_rst;
145
Mark Yao2048e322014-08-22 18:36:26 +0800146 struct vop_win win[];
147};
148
Mark Yao2048e322014-08-22 18:36:26 +0800149static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
150{
151 writel(v, vop->regs + offset);
152 vop->regsbak[offset >> 2] = v;
153}
154
155static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
156{
157 return readl(vop->regs + offset);
158}
159
160static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
161 const struct vop_reg *reg)
162{
163 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
164}
165
Mark Yao2048e322014-08-22 18:36:26 +0800166static inline void vop_mask_write(struct vop *vop, uint32_t offset,
Mark Yaod49463e2016-04-20 14:18:15 +0800167 uint32_t mask, uint32_t shift, uint32_t v,
168 bool write_mask, bool relaxed)
Mark Yao2048e322014-08-22 18:36:26 +0800169{
Mark Yaod49463e2016-04-20 14:18:15 +0800170 if (!mask)
171 return;
172
173 if (write_mask) {
174 v = ((v << shift) & 0xffff) | (mask << (shift + 16));
175 } else {
Mark Yao2048e322014-08-22 18:36:26 +0800176 uint32_t cached_val = vop->regsbak[offset >> 2];
177
Mark Yaod49463e2016-04-20 14:18:15 +0800178 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
179 vop->regsbak[offset >> 2] = v;
Mark Yao2048e322014-08-22 18:36:26 +0800180 }
Mark Yao2048e322014-08-22 18:36:26 +0800181
Mark Yaod49463e2016-04-20 14:18:15 +0800182 if (relaxed)
183 writel_relaxed(v, vop->regs + offset);
184 else
185 writel(v, vop->regs + offset);
Mark Yao2048e322014-08-22 18:36:26 +0800186}
187
Mark Yaodbb3d942015-12-15 08:36:55 +0800188static inline uint32_t vop_get_intr_type(struct vop *vop,
189 const struct vop_reg *reg, int type)
190{
191 uint32_t i, ret = 0;
192 uint32_t regs = vop_read_reg(vop, 0, reg);
193
194 for (i = 0; i < vop->data->intr->nintrs; i++) {
195 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
196 ret |= vop->data->intr->intrs[i];
197 }
198
199 return ret;
200}
201
Mark Yao0cf33fe2015-12-14 18:14:36 +0800202static inline void vop_cfg_done(struct vop *vop)
203{
204 VOP_CTRL_SET(vop, cfg_done, 1);
205}
206
Tomasz Figa85a359f2015-05-11 19:55:39 +0900207static bool has_rb_swapped(uint32_t format)
208{
209 switch (format) {
210 case DRM_FORMAT_XBGR8888:
211 case DRM_FORMAT_ABGR8888:
212 case DRM_FORMAT_BGR888:
213 case DRM_FORMAT_BGR565:
214 return true;
215 default:
216 return false;
217 }
218}
219
Mark Yao2048e322014-08-22 18:36:26 +0800220static enum vop_data_format vop_convert_format(uint32_t format)
221{
222 switch (format) {
223 case DRM_FORMAT_XRGB8888:
224 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900225 case DRM_FORMAT_XBGR8888:
226 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800227 return VOP_FMT_ARGB8888;
228 case DRM_FORMAT_RGB888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900229 case DRM_FORMAT_BGR888:
Mark Yao2048e322014-08-22 18:36:26 +0800230 return VOP_FMT_RGB888;
231 case DRM_FORMAT_RGB565:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900232 case DRM_FORMAT_BGR565:
Mark Yao2048e322014-08-22 18:36:26 +0800233 return VOP_FMT_RGB565;
234 case DRM_FORMAT_NV12:
235 return VOP_FMT_YUV420SP;
236 case DRM_FORMAT_NV16:
237 return VOP_FMT_YUV422SP;
238 case DRM_FORMAT_NV24:
239 return VOP_FMT_YUV444SP;
240 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400241 DRM_ERROR("unsupported format[%08x]\n", format);
Mark Yao2048e322014-08-22 18:36:26 +0800242 return -EINVAL;
243 }
244}
245
Mark Yao84c7f8c2015-07-20 16:16:49 +0800246static bool is_yuv_support(uint32_t format)
247{
248 switch (format) {
249 case DRM_FORMAT_NV12:
250 case DRM_FORMAT_NV16:
251 case DRM_FORMAT_NV24:
252 return true;
253 default:
254 return false;
255 }
256}
257
Mark Yao2048e322014-08-22 18:36:26 +0800258static bool is_alpha_support(uint32_t format)
259{
260 switch (format) {
261 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900262 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800263 return true;
264 default:
265 return false;
266 }
267}
268
Mark Yao4c156c22015-06-26 17:14:46 +0800269static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
270 uint32_t dst, bool is_horizontal,
271 int vsu_mode, int *vskiplines)
272{
273 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
274
275 if (is_horizontal) {
276 if (mode == SCALE_UP)
277 val = GET_SCL_FT_BIC(src, dst);
278 else if (mode == SCALE_DOWN)
279 val = GET_SCL_FT_BILI_DN(src, dst);
280 } else {
281 if (mode == SCALE_UP) {
282 if (vsu_mode == SCALE_UP_BIL)
283 val = GET_SCL_FT_BILI_UP(src, dst);
284 else
285 val = GET_SCL_FT_BIC(src, dst);
286 } else if (mode == SCALE_DOWN) {
287 if (vskiplines) {
288 *vskiplines = scl_get_vskiplines(src, dst);
289 val = scl_get_bili_dn_vskip(src, dst,
290 *vskiplines);
291 } else {
292 val = GET_SCL_FT_BILI_DN(src, dst);
293 }
294 }
295 }
296
297 return val;
298}
299
300static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
301 uint32_t src_w, uint32_t src_h, uint32_t dst_w,
302 uint32_t dst_h, uint32_t pixel_format)
303{
304 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
305 uint16_t cbcr_hor_scl_mode = SCALE_NONE;
306 uint16_t cbcr_ver_scl_mode = SCALE_NONE;
307 int hsub = drm_format_horz_chroma_subsampling(pixel_format);
308 int vsub = drm_format_vert_chroma_subsampling(pixel_format);
309 bool is_yuv = is_yuv_support(pixel_format);
310 uint16_t cbcr_src_w = src_w / hsub;
311 uint16_t cbcr_src_h = src_h / vsub;
312 uint16_t vsu_mode;
313 uint16_t lb_mode;
314 uint32_t val;
Mark Yao2db00cf2016-04-29 15:39:53 +0800315 int vskiplines = 0;
Mark Yao4c156c22015-06-26 17:14:46 +0800316
317 if (dst_w > 3840) {
Sean Paulee4d7892016-08-12 13:00:54 -0400318 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800319 return;
320 }
321
Mark Yao1194fff2015-12-15 09:08:43 +0800322 if (!win->phy->scl->ext) {
323 VOP_SCL_SET(vop, win, scale_yrgb_x,
324 scl_cal_scale2(src_w, dst_w));
325 VOP_SCL_SET(vop, win, scale_yrgb_y,
326 scl_cal_scale2(src_h, dst_h));
327 if (is_yuv) {
328 VOP_SCL_SET(vop, win, scale_cbcr_x,
Mark Yaoee8662f2016-06-06 15:58:46 +0800329 scl_cal_scale2(cbcr_src_w, dst_w));
Mark Yao1194fff2015-12-15 09:08:43 +0800330 VOP_SCL_SET(vop, win, scale_cbcr_y,
Mark Yaoee8662f2016-06-06 15:58:46 +0800331 scl_cal_scale2(cbcr_src_h, dst_h));
Mark Yao1194fff2015-12-15 09:08:43 +0800332 }
333 return;
334 }
335
Mark Yao4c156c22015-06-26 17:14:46 +0800336 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
337 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
338
339 if (is_yuv) {
340 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
341 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
342 if (cbcr_hor_scl_mode == SCALE_DOWN)
343 lb_mode = scl_vop_cal_lb_mode(dst_w, true);
344 else
345 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
346 } else {
347 if (yrgb_hor_scl_mode == SCALE_DOWN)
348 lb_mode = scl_vop_cal_lb_mode(dst_w, false);
349 else
350 lb_mode = scl_vop_cal_lb_mode(src_w, false);
351 }
352
Mark Yao1194fff2015-12-15 09:08:43 +0800353 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800354 if (lb_mode == LB_RGB_3840X2) {
355 if (yrgb_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400356 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800357 return;
358 }
359 if (cbcr_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400360 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800361 return;
362 }
363 vsu_mode = SCALE_UP_BIL;
364 } else if (lb_mode == LB_RGB_2560X4) {
365 vsu_mode = SCALE_UP_BIL;
366 } else {
367 vsu_mode = SCALE_UP_BIC;
368 }
369
370 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
371 true, 0, NULL);
372 VOP_SCL_SET(vop, win, scale_yrgb_x, val);
373 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
374 false, vsu_mode, &vskiplines);
375 VOP_SCL_SET(vop, win, scale_yrgb_y, val);
376
Mark Yao1194fff2015-12-15 09:08:43 +0800377 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
378 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
Mark Yao4c156c22015-06-26 17:14:46 +0800379
Mark Yao1194fff2015-12-15 09:08:43 +0800380 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
381 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
382 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
383 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
384 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800385 if (is_yuv) {
386 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
387 dst_w, true, 0, NULL);
388 VOP_SCL_SET(vop, win, scale_cbcr_x, val);
389 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
390 dst_h, false, vsu_mode, &vskiplines);
391 VOP_SCL_SET(vop, win, scale_cbcr_y, val);
392
Mark Yao1194fff2015-12-15 09:08:43 +0800393 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
394 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
395 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
396 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
397 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
398 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
399 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800400 }
401}
402
Mark Yao10672192015-02-04 13:10:31 +0800403static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
404{
405 unsigned long flags;
406
407 if (WARN_ON(!vop->is_enabled))
408 return;
409
410 spin_lock_irqsave(&vop->irq_lock, flags);
411
Tomasz Figafa374102016-09-14 21:54:54 +0900412 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800413 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
Mark Yao10672192015-02-04 13:10:31 +0800414
415 spin_unlock_irqrestore(&vop->irq_lock, flags);
416}
417
418static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
419{
420 unsigned long flags;
421
422 if (WARN_ON(!vop->is_enabled))
423 return;
424
425 spin_lock_irqsave(&vop->irq_lock, flags);
426
Mark Yaodbb3d942015-12-15 08:36:55 +0800427 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
Mark Yao10672192015-02-04 13:10:31 +0800428
429 spin_unlock_irqrestore(&vop->irq_lock, flags);
430}
431
Yakir Yang69c34e42016-07-24 14:57:40 +0800432/*
433 * (1) each frame starts at the start of the Vsync pulse which is signaled by
434 * the "FRAME_SYNC" interrupt.
435 * (2) the active data region of each frame ends at dsp_vact_end
436 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
437 * to get "LINE_FLAG" interrupt at the end of the active on screen data.
438 *
439 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
440 * Interrupts
441 * LINE_FLAG -------------------------------+
442 * FRAME_SYNC ----+ |
443 * | |
444 * v v
445 * | Vsync | Vbp | Vactive | Vfp |
446 * ^ ^ ^ ^
447 * | | | |
448 * | | | |
449 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
450 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
451 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
452 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
453 */
454static bool vop_line_flag_irq_is_enabled(struct vop *vop)
455{
456 uint32_t line_flag_irq;
457 unsigned long flags;
458
459 spin_lock_irqsave(&vop->irq_lock, flags);
460
461 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
462
463 spin_unlock_irqrestore(&vop->irq_lock, flags);
464
465 return !!line_flag_irq;
466}
467
468static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
469{
470 unsigned long flags;
471
472 if (WARN_ON(!vop->is_enabled))
473 return;
474
475 spin_lock_irqsave(&vop->irq_lock, flags);
476
477 VOP_CTRL_SET(vop, line_flag_num[0], line_num);
Tomasz Figafa374102016-09-14 21:54:54 +0900478 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
Yakir Yang69c34e42016-07-24 14:57:40 +0800479 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
480
481 spin_unlock_irqrestore(&vop->irq_lock, flags);
482}
483
484static void vop_line_flag_irq_disable(struct vop *vop)
485{
486 unsigned long flags;
487
488 if (WARN_ON(!vop->is_enabled))
489 return;
490
491 spin_lock_irqsave(&vop->irq_lock, flags);
492
493 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
494
495 spin_unlock_irqrestore(&vop->irq_lock, flags);
496}
497
Sean Paul39a9ad82016-08-15 16:12:29 -0700498static int vop_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800499{
500 struct vop *vop = to_vop(crtc);
501 int ret;
502
Mark Yao5d82d1a2015-04-01 13:48:53 +0800503 ret = pm_runtime_get_sync(vop->dev);
504 if (ret < 0) {
505 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
Sean Paul39a9ad82016-08-15 16:12:29 -0700506 goto err_put_pm_runtime;
Mark Yao5d82d1a2015-04-01 13:48:53 +0800507 }
508
Mark Yao2048e322014-08-22 18:36:26 +0800509 ret = clk_enable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700510 if (WARN_ON(ret < 0))
511 goto err_put_pm_runtime;
Mark Yao2048e322014-08-22 18:36:26 +0800512
513 ret = clk_enable(vop->dclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700514 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800515 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +0800516
517 ret = clk_enable(vop->aclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700518 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800519 goto err_disable_dclk;
Mark Yao2048e322014-08-22 18:36:26 +0800520
521 /*
522 * Slave iommu shares power, irq and clock with vop. It was associated
523 * automatically with this master device via common driver code.
524 * Now that we have enabled the clock we attach it to the shared drm
525 * mapping.
526 */
527 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
528 if (ret) {
529 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
530 goto err_disable_aclk;
531 }
532
Mark Yao77faa162015-07-20 16:25:20 +0800533 memcpy(vop->regs, vop->regsbak, vop->len);
Chris Zhong17a794d2016-08-26 20:39:38 -0700534 vop_cfg_done(vop);
535
Mark Yao52ab7892015-01-22 18:29:57 +0800536 /*
537 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
538 */
539 vop->is_enabled = true;
540
Mark Yao2048e322014-08-22 18:36:26 +0800541 spin_lock(&vop->reg_lock);
542
543 VOP_CTRL_SET(vop, standby, 0);
544
545 spin_unlock(&vop->reg_lock);
546
547 enable_irq(vop->irq);
548
Mark Yaob5f7b752015-11-23 15:21:08 +0800549 drm_crtc_vblank_on(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800550
Sean Paul39a9ad82016-08-15 16:12:29 -0700551 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800552
553err_disable_aclk:
554 clk_disable(vop->aclk);
555err_disable_dclk:
556 clk_disable(vop->dclk);
557err_disable_hclk:
558 clk_disable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700559err_put_pm_runtime:
560 pm_runtime_put_sync(vop->dev);
561 return ret;
Mark Yao2048e322014-08-22 18:36:26 +0800562}
563
Mark Yao0ad36752015-11-09 11:33:16 +0800564static void vop_crtc_disable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800565{
566 struct vop *vop = to_vop(crtc);
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100567 int i;
Mark Yao2048e322014-08-22 18:36:26 +0800568
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200569 WARN_ON(vop->event);
570
Sean Paulb883c9b2016-08-18 12:01:46 -0700571 rockchip_drm_psr_deactivate(&vop->crtc);
572
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100573 /*
574 * We need to make sure that all windows are disabled before we
575 * disable that crtc. Otherwise we might try to scan from a destroyed
576 * buffer later.
577 */
578 for (i = 0; i < vop->data->win_size; i++) {
579 struct vop_win *vop_win = &vop->win[i];
580 const struct vop_win_data *win = vop_win->data;
581
582 spin_lock(&vop->reg_lock);
583 VOP_WIN_SET(vop, win, enable, 0);
584 spin_unlock(&vop->reg_lock);
585 }
586
Chris Zhong17a794d2016-08-26 20:39:38 -0700587 vop_cfg_done(vop);
588
Mark Yaob5f7b752015-11-23 15:21:08 +0800589 drm_crtc_vblank_off(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800590
Mark Yao2048e322014-08-22 18:36:26 +0800591 /*
Mark Yao10672192015-02-04 13:10:31 +0800592 * Vop standby will take effect at end of current frame,
593 * if dsp hold valid irq happen, it means standby complete.
594 *
595 * we must wait standby complete when we want to disable aclk,
596 * if not, memory bus maybe dead.
Mark Yao2048e322014-08-22 18:36:26 +0800597 */
Mark Yao10672192015-02-04 13:10:31 +0800598 reinit_completion(&vop->dsp_hold_completion);
599 vop_dsp_hold_valid_irq_enable(vop);
600
Mark Yao2048e322014-08-22 18:36:26 +0800601 spin_lock(&vop->reg_lock);
602
603 VOP_CTRL_SET(vop, standby, 1);
604
605 spin_unlock(&vop->reg_lock);
Mark Yao52ab7892015-01-22 18:29:57 +0800606
Mark Yao10672192015-02-04 13:10:31 +0800607 wait_for_completion(&vop->dsp_hold_completion);
Mark Yao2048e322014-08-22 18:36:26 +0800608
Mark Yao10672192015-02-04 13:10:31 +0800609 vop_dsp_hold_valid_irq_disable(vop);
610
611 disable_irq(vop->irq);
612
613 vop->is_enabled = false;
614
615 /*
616 * vop standby complete, so iommu detach is safe.
617 */
Mark Yao2048e322014-08-22 18:36:26 +0800618 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
619
Mark Yao10672192015-02-04 13:10:31 +0800620 clk_disable(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +0800621 clk_disable(vop->aclk);
622 clk_disable(vop->hclk);
Mark Yao5d82d1a2015-04-01 13:48:53 +0800623 pm_runtime_put(vop->dev);
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200624
625 if (crtc->state->event && !crtc->state->active) {
626 spin_lock_irq(&crtc->dev->event_lock);
627 drm_crtc_send_vblank_event(crtc, crtc->state->event);
628 spin_unlock_irq(&crtc->dev->event_lock);
629
630 crtc->state->event = NULL;
631 }
Mark Yao2048e322014-08-22 18:36:26 +0800632}
633
Mark Yao63ebb9f2015-11-30 18:22:42 +0800634static void vop_plane_destroy(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800635{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800636 drm_plane_cleanup(plane);
Mark Yao2048e322014-08-22 18:36:26 +0800637}
638
Mark Yao63ebb9f2015-11-30 18:22:42 +0800639static int vop_plane_atomic_check(struct drm_plane *plane,
640 struct drm_plane_state *state)
Mark Yao2048e322014-08-22 18:36:26 +0800641{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800642 struct drm_crtc *crtc = state->crtc;
John Keeping92915da2016-03-04 11:04:03 +0000643 struct drm_crtc_state *crtc_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800644 struct drm_framebuffer *fb = state->fb;
Mark Yao2048e322014-08-22 18:36:26 +0800645 struct vop_win *vop_win = to_vop_win(plane);
646 const struct vop_win_data *win = vop_win->data;
Mark Yao2048e322014-08-22 18:36:26 +0800647 int ret;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800648 struct drm_rect clip;
Mark Yao4c156c22015-06-26 17:14:46 +0800649 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
650 DRM_PLANE_HELPER_NO_SCALING;
651 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
652 DRM_PLANE_HELPER_NO_SCALING;
Mark Yao2048e322014-08-22 18:36:26 +0800653
Mark Yao63ebb9f2015-11-30 18:22:42 +0800654 if (!crtc || !fb)
Tomasz Figad47a7242016-09-14 21:55:01 +0900655 return 0;
John Keeping92915da2016-03-04 11:04:03 +0000656
657 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
658 if (WARN_ON(!crtc_state))
659 return -EINVAL;
660
Mark Yao63ebb9f2015-11-30 18:22:42 +0800661 clip.x1 = 0;
662 clip.y1 = 0;
John Keeping92915da2016-03-04 11:04:03 +0000663 clip.x2 = crtc_state->adjusted_mode.hdisplay;
664 clip.y2 = crtc_state->adjusted_mode.vdisplay;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800665
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300666 ret = drm_plane_helper_check_state(state, &clip,
667 min_scale, max_scale,
668 true, true);
Mark Yao2048e322014-08-22 18:36:26 +0800669 if (ret)
670 return ret;
671
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300672 if (!state->visible)
Tomasz Figad47a7242016-09-14 21:55:01 +0900673 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800674
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200675 ret = vop_convert_format(fb->format->format);
Tomasz Figad47a7242016-09-14 21:55:01 +0900676 if (ret < 0)
677 return ret;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800678
Mark Yao63ebb9f2015-11-30 18:22:42 +0800679 /*
680 * Src.x1 can be odd when do clip, but yuv plane start point
681 * need align with 2 pixel.
682 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200683 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800684 return -EINVAL;
685
Mark Yao63ebb9f2015-11-30 18:22:42 +0800686 return 0;
687}
688
689static void vop_plane_atomic_disable(struct drm_plane *plane,
690 struct drm_plane_state *old_state)
691{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800692 struct vop_win *vop_win = to_vop_win(plane);
693 const struct vop_win_data *win = vop_win->data;
694 struct vop *vop = to_vop(old_state->crtc);
695
696 if (!old_state->crtc)
697 return;
698
699 spin_lock(&vop->reg_lock);
700
701 VOP_WIN_SET(vop, win, enable, 0);
702
703 spin_unlock(&vop->reg_lock);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800704}
705
706static void vop_plane_atomic_update(struct drm_plane *plane,
707 struct drm_plane_state *old_state)
708{
709 struct drm_plane_state *state = plane->state;
710 struct drm_crtc *crtc = state->crtc;
711 struct vop_win *vop_win = to_vop_win(plane);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800712 const struct vop_win_data *win = vop_win->data;
713 struct vop *vop = to_vop(state->crtc);
714 struct drm_framebuffer *fb = state->fb;
715 unsigned int actual_w, actual_h;
716 unsigned int dsp_stx, dsp_sty;
717 uint32_t act_info, dsp_info, dsp_st;
Ville Syrjäläac920282016-07-26 19:07:01 +0300718 struct drm_rect *src = &state->src;
719 struct drm_rect *dest = &state->dst;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800720 struct drm_gem_object *obj, *uv_obj;
721 struct rockchip_gem_object *rk_obj, *rk_uv_obj;
722 unsigned long offset;
723 dma_addr_t dma_addr;
724 uint32_t val;
725 bool rb_swap;
Tomasz Figad47a7242016-09-14 21:55:01 +0900726 int format;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800727
728 /*
729 * can't update plane when vop is disabled.
730 */
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200731 if (WARN_ON(!crtc))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800732 return;
733
734 if (WARN_ON(!vop->is_enabled))
735 return;
736
Tomasz Figad47a7242016-09-14 21:55:01 +0900737 if (!state->visible) {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800738 vop_plane_atomic_disable(plane, old_state);
739 return;
740 }
Mark Yao2048e322014-08-22 18:36:26 +0800741
742 obj = rockchip_fb_get_gem_obj(fb, 0);
Mark Yao2048e322014-08-22 18:36:26 +0800743 rk_obj = to_rockchip_obj(obj);
744
Mark Yao63ebb9f2015-11-30 18:22:42 +0800745 actual_w = drm_rect_width(src) >> 16;
746 actual_h = drm_rect_height(src) >> 16;
747 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800748
Mark Yao63ebb9f2015-11-30 18:22:42 +0800749 dsp_info = (drm_rect_height(dest) - 1) << 16;
750 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
Mark Yao2048e322014-08-22 18:36:26 +0800751
Mark Yao63ebb9f2015-11-30 18:22:42 +0800752 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
753 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
754 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
Mark Yao2048e322014-08-22 18:36:26 +0800755
Ville Syrjälä353c8592016-12-14 23:30:57 +0200756 offset = (src->x1 >> 16) * fb->format->cpp[0];
Mark Yao63ebb9f2015-11-30 18:22:42 +0800757 offset += (src->y1 >> 16) * fb->pitches[0];
Tomasz Figad47a7242016-09-14 21:55:01 +0900758 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
759
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200760 format = vop_convert_format(fb->format->format);
Mark Yao2048e322014-08-22 18:36:26 +0800761
Mark Yao63ebb9f2015-11-30 18:22:42 +0800762 spin_lock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800763
Tomasz Figad47a7242016-09-14 21:55:01 +0900764 VOP_WIN_SET(vop, win, format, format);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800765 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
Tomasz Figad47a7242016-09-14 21:55:01 +0900766 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200767 if (is_yuv_support(fb->format->format)) {
768 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
769 int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
Ville Syrjälä353c8592016-12-14 23:30:57 +0200770 int bpp = fb->format->cpp[1];
Mark Yao84c7f8c2015-07-20 16:16:49 +0800771
772 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800773 rk_uv_obj = to_rockchip_obj(uv_obj);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800774
Mark Yao63ebb9f2015-11-30 18:22:42 +0800775 offset = (src->x1 >> 16) * bpp / hsub;
776 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800777
Mark Yao63ebb9f2015-11-30 18:22:42 +0800778 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
779 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
780 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800781 }
Mark Yao4c156c22015-06-26 17:14:46 +0800782
783 if (win->phy->scl)
784 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800785 drm_rect_width(dest), drm_rect_height(dest),
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200786 fb->format->format);
Mark Yao4c156c22015-06-26 17:14:46 +0800787
Mark Yao63ebb9f2015-11-30 18:22:42 +0800788 VOP_WIN_SET(vop, win, act_info, act_info);
789 VOP_WIN_SET(vop, win, dsp_info, dsp_info);
790 VOP_WIN_SET(vop, win, dsp_st, dsp_st);
Mark Yao4c156c22015-06-26 17:14:46 +0800791
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200792 rb_swap = has_rb_swapped(fb->format->format);
Tomasz Figa85a359f2015-05-11 19:55:39 +0900793 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
Mark Yao2048e322014-08-22 18:36:26 +0800794
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200795 if (is_alpha_support(fb->format->format)) {
Mark Yao2048e322014-08-22 18:36:26 +0800796 VOP_WIN_SET(vop, win, dst_alpha_ctl,
797 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
798 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
799 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
800 SRC_BLEND_M0(ALPHA_PER_PIX) |
801 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
802 SRC_FACTOR_M0(ALPHA_ONE);
803 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
804 } else {
805 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
806 }
807
808 VOP_WIN_SET(vop, win, enable, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800809 spin_unlock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800810}
811
Mark Yao63ebb9f2015-11-30 18:22:42 +0800812static const struct drm_plane_helper_funcs plane_helper_funcs = {
813 .atomic_check = vop_plane_atomic_check,
814 .atomic_update = vop_plane_atomic_update,
815 .atomic_disable = vop_plane_atomic_disable,
816};
817
Mark Yao2048e322014-08-22 18:36:26 +0800818static const struct drm_plane_funcs vop_plane_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800819 .update_plane = drm_atomic_helper_update_plane,
820 .disable_plane = drm_atomic_helper_disable_plane,
Mark Yao2048e322014-08-22 18:36:26 +0800821 .destroy = vop_plane_destroy,
Tomasz Figad47a7242016-09-14 21:55:01 +0900822 .reset = drm_atomic_helper_plane_reset,
823 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
824 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +0800825};
826
Mark Yao2048e322014-08-22 18:36:26 +0800827static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
828{
829 struct vop *vop = to_vop(crtc);
830 unsigned long flags;
831
Mark Yao63ebb9f2015-11-30 18:22:42 +0800832 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800833 return -EPERM;
834
835 spin_lock_irqsave(&vop->irq_lock, flags);
836
Tomasz Figafa374102016-09-14 21:54:54 +0900837 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800838 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800839
840 spin_unlock_irqrestore(&vop->irq_lock, flags);
841
842 return 0;
843}
844
845static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
846{
847 struct vop *vop = to_vop(crtc);
848 unsigned long flags;
849
Mark Yao63ebb9f2015-11-30 18:22:42 +0800850 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800851 return;
Mark Yao31e980c2015-01-22 14:37:56 +0800852
Mark Yao2048e322014-08-22 18:36:26 +0800853 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +0800854
855 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
856
Mark Yao2048e322014-08-22 18:36:26 +0800857 spin_unlock_irqrestore(&vop->irq_lock, flags);
858}
859
860static const struct rockchip_crtc_funcs private_crtc_funcs = {
861 .enable_vblank = vop_crtc_enable_vblank,
862 .disable_vblank = vop_crtc_disable_vblank,
863};
864
Mark Yao2048e322014-08-22 18:36:26 +0800865static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
866 const struct drm_display_mode *mode,
867 struct drm_display_mode *adjusted_mode)
868{
Chris Zhongb59b8de2016-01-06 12:03:53 +0800869 struct vop *vop = to_vop(crtc);
870
Chris Zhongb59b8de2016-01-06 12:03:53 +0800871 adjusted_mode->clock =
872 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
873
Mark Yao2048e322014-08-22 18:36:26 +0800874 return true;
875}
876
Mark Yao63ebb9f2015-11-30 18:22:42 +0800877static void vop_crtc_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800878{
879 struct vop *vop = to_vop(crtc);
Mark Yao4e257d92016-04-20 10:41:42 +0800880 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800881 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
Mark Yao2048e322014-08-22 18:36:26 +0800882 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
883 u16 hdisplay = adjusted_mode->hdisplay;
884 u16 htotal = adjusted_mode->htotal;
885 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
886 u16 hact_end = hact_st + hdisplay;
887 u16 vdisplay = adjusted_mode->vdisplay;
888 u16 vtotal = adjusted_mode->vtotal;
889 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
890 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
891 u16 vact_end = vact_st + vdisplay;
Mark Yao0a63bfd2016-04-20 14:18:16 +0800892 uint32_t pin_pol, val;
Sean Paul39a9ad82016-08-15 16:12:29 -0700893 int ret;
Mark Yao2048e322014-08-22 18:36:26 +0800894
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200895 WARN_ON(vop->event);
896
Sean Paul39a9ad82016-08-15 16:12:29 -0700897 ret = vop_enable(crtc);
898 if (ret) {
899 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
900 return;
901 }
902
Mark Yao2048e322014-08-22 18:36:26 +0800903 /*
Mark Yaoce3887e2015-12-16 18:08:17 +0800904 * If dclk rate is zero, mean that scanout is stop,
905 * we don't need wait any more.
Mark Yao2048e322014-08-22 18:36:26 +0800906 */
Mark Yaoce3887e2015-12-16 18:08:17 +0800907 if (clk_get_rate(vop->dclk)) {
908 /*
909 * Rk3288 vop timing register is immediately, when configure
910 * display timing on display time, may cause tearing.
911 *
912 * Vop standby will take effect at end of current frame,
913 * if dsp hold valid irq happen, it means standby complete.
914 *
915 * mode set:
916 * standby and wait complete --> |----
917 * | display time
918 * |----
919 * |---> dsp hold irq
920 * configure display timing --> |
921 * standby exit |
922 * | new frame start.
923 */
924
925 reinit_completion(&vop->dsp_hold_completion);
926 vop_dsp_hold_valid_irq_enable(vop);
927
928 spin_lock(&vop->reg_lock);
929
930 VOP_CTRL_SET(vop, standby, 1);
931
932 spin_unlock(&vop->reg_lock);
933
934 wait_for_completion(&vop->dsp_hold_completion);
935
936 vop_dsp_hold_valid_irq_disable(vop);
937 }
Mark Yao2048e322014-08-22 18:36:26 +0800938
Mark Yao0a63bfd2016-04-20 14:18:16 +0800939 pin_pol = 0x8;
940 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
941 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
942 VOP_CTRL_SET(vop, pin_pol, pin_pol);
943
Mark Yao4e257d92016-04-20 10:41:42 +0800944 switch (s->output_type) {
945 case DRM_MODE_CONNECTOR_LVDS:
946 VOP_CTRL_SET(vop, rgb_en, 1);
Mark Yao0a63bfd2016-04-20 14:18:16 +0800947 VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800948 break;
949 case DRM_MODE_CONNECTOR_eDP:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800950 VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800951 VOP_CTRL_SET(vop, edp_en, 1);
952 break;
953 case DRM_MODE_CONNECTOR_HDMIA:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800954 VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800955 VOP_CTRL_SET(vop, hdmi_en, 1);
956 break;
957 case DRM_MODE_CONNECTOR_DSI:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800958 VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800959 VOP_CTRL_SET(vop, mipi_en, 1);
960 break;
961 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400962 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
963 s->output_type);
Mark Yao4e257d92016-04-20 10:41:42 +0800964 }
965 VOP_CTRL_SET(vop, out_mode, s->output_mode);
Mark Yao2048e322014-08-22 18:36:26 +0800966
967 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
968 val = hact_st << 16;
969 val |= hact_end;
970 VOP_CTRL_SET(vop, hact_st_end, val);
971 VOP_CTRL_SET(vop, hpost_st_end, val);
972
973 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
974 val = vact_st << 16;
975 val |= vact_end;
976 VOP_CTRL_SET(vop, vact_st_end, val);
977 VOP_CTRL_SET(vop, vpost_st_end, val);
978
Mark Yao2048e322014-08-22 18:36:26 +0800979 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
Mark Yaoce3887e2015-12-16 18:08:17 +0800980
981 VOP_CTRL_SET(vop, standby, 0);
Sean Paulb883c9b2016-08-18 12:01:46 -0700982
983 rockchip_drm_psr_activate(&vop->crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800984}
Mark Yao2048e322014-08-22 18:36:26 +0800985
Tomasz Figa7caecdb2016-09-14 21:54:56 +0900986static bool vop_fs_irq_is_pending(struct vop *vop)
987{
988 return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
989}
990
991static void vop_wait_for_irq_handler(struct vop *vop)
992{
993 bool pending;
994 int ret;
995
996 /*
997 * Spin until frame start interrupt status bit goes low, which means
998 * that interrupt handler was invoked and cleared it. The timeout of
999 * 10 msecs is really too long, but it is just a safety measure if
1000 * something goes really wrong. The wait will only happen in the very
1001 * unlikely case of a vblank happening exactly at the same time and
1002 * shouldn't exceed microseconds range.
1003 */
1004 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
1005 !pending, 0, 10 * 1000);
1006 if (ret)
1007 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
1008
1009 synchronize_irq(vop->irq);
1010}
1011
Mark Yao63ebb9f2015-11-30 18:22:42 +08001012static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1013 struct drm_crtc_state *old_crtc_state)
1014{
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001015 struct drm_atomic_state *old_state = old_crtc_state->state;
1016 struct drm_plane_state *old_plane_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001017 struct vop *vop = to_vop(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001018 struct drm_plane *plane;
1019 int i;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001020
1021 if (WARN_ON(!vop->is_enabled))
1022 return;
1023
1024 spin_lock(&vop->reg_lock);
1025
1026 vop_cfg_done(vop);
1027
1028 spin_unlock(&vop->reg_lock);
Tomasz Figa7caecdb2016-09-14 21:54:56 +09001029
1030 /*
1031 * There is a (rather unlikely) possiblity that a vblank interrupt
1032 * fired before we set the cfg_done bit. To avoid spuriously
1033 * signalling flip completion we need to wait for it to finish.
1034 */
1035 vop_wait_for_irq_handler(vop);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001036
Tomasz Figa41ee4362016-09-14 21:55:00 +09001037 spin_lock_irq(&crtc->dev->event_lock);
1038 if (crtc->state->event) {
1039 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1040 WARN_ON(vop->event);
1041
1042 vop->event = crtc->state->event;
1043 crtc->state->event = NULL;
1044 }
1045 spin_unlock_irq(&crtc->dev->event_lock);
1046
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001047 for_each_plane_in_state(old_state, plane, old_plane_state, i) {
1048 if (!old_plane_state->fb)
1049 continue;
1050
1051 if (old_plane_state->fb == plane->state->fb)
1052 continue;
1053
1054 drm_framebuffer_reference(old_plane_state->fb);
1055 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1056 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1057 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1058 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001059}
1060
1061static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1062 struct drm_crtc_state *old_crtc_state)
1063{
Sean Paulb883c9b2016-08-18 12:01:46 -07001064 rockchip_drm_psr_flush(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001065}
1066
Mark Yao2048e322014-08-22 18:36:26 +08001067static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
Mark Yao0ad36752015-11-09 11:33:16 +08001068 .enable = vop_crtc_enable,
1069 .disable = vop_crtc_disable,
Mark Yao2048e322014-08-22 18:36:26 +08001070 .mode_fixup = vop_crtc_mode_fixup,
Mark Yao63ebb9f2015-11-30 18:22:42 +08001071 .atomic_flush = vop_crtc_atomic_flush,
1072 .atomic_begin = vop_crtc_atomic_begin,
Mark Yao2048e322014-08-22 18:36:26 +08001073};
1074
Mark Yao2048e322014-08-22 18:36:26 +08001075static void vop_crtc_destroy(struct drm_crtc *crtc)
1076{
1077 drm_crtc_cleanup(crtc);
1078}
1079
John Keepingdc0b4082016-07-14 16:29:15 +01001080static void vop_crtc_reset(struct drm_crtc *crtc)
1081{
1082 if (crtc->state)
1083 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1084 kfree(crtc->state);
1085
1086 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
1087 if (crtc->state)
1088 crtc->state->crtc = crtc;
1089}
1090
Mark Yao4e257d92016-04-20 10:41:42 +08001091static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
1092{
1093 struct rockchip_crtc_state *rockchip_state;
1094
1095 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
1096 if (!rockchip_state)
1097 return NULL;
1098
1099 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
1100 return &rockchip_state->base;
1101}
1102
1103static void vop_crtc_destroy_state(struct drm_crtc *crtc,
1104 struct drm_crtc_state *state)
1105{
1106 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
1107
Daniel Vetterec2dc6a2016-05-09 16:34:09 +02001108 __drm_atomic_helper_crtc_destroy_state(&s->base);
Mark Yao4e257d92016-04-20 10:41:42 +08001109 kfree(s);
1110}
1111
Mark Yao2048e322014-08-22 18:36:26 +08001112static const struct drm_crtc_funcs vop_crtc_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001113 .set_config = drm_atomic_helper_set_config,
1114 .page_flip = drm_atomic_helper_page_flip,
Mark Yao2048e322014-08-22 18:36:26 +08001115 .destroy = vop_crtc_destroy,
John Keepingdc0b4082016-07-14 16:29:15 +01001116 .reset = vop_crtc_reset,
Mark Yao4e257d92016-04-20 10:41:42 +08001117 .atomic_duplicate_state = vop_crtc_duplicate_state,
1118 .atomic_destroy_state = vop_crtc_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +08001119};
1120
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001121static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
1122{
1123 struct vop *vop = container_of(work, struct vop, fb_unref_work);
1124 struct drm_framebuffer *fb = val;
1125
1126 drm_crtc_vblank_put(&vop->crtc);
1127 drm_framebuffer_unreference(fb);
1128}
1129
Mark Yao63ebb9f2015-11-30 18:22:42 +08001130static void vop_handle_vblank(struct vop *vop)
1131{
1132 struct drm_device *drm = vop->drm_dev;
1133 struct drm_crtc *crtc = &vop->crtc;
1134 unsigned long flags;
Mark Yao2048e322014-08-22 18:36:26 +08001135
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001136 spin_lock_irqsave(&drm->event_lock, flags);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001137 if (vop->event) {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001138 drm_crtc_send_vblank_event(crtc, vop->event);
Sean Paul5b680402016-08-10 16:24:39 -04001139 drm_crtc_vblank_put(crtc);
Tomasz Figa646ec682016-09-14 21:54:59 +09001140 vop->event = NULL;
Sean Paul5b680402016-08-10 16:24:39 -04001141 }
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001142 spin_unlock_irqrestore(&drm->event_lock, flags);
1143
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001144 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
1145 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
Mark Yao2048e322014-08-22 18:36:26 +08001146}
1147
1148static irqreturn_t vop_isr(int irq, void *data)
1149{
1150 struct vop *vop = data;
Mark Yaob5f7b752015-11-23 15:21:08 +08001151 struct drm_crtc *crtc = &vop->crtc;
Mark Yaodbb3d942015-12-15 08:36:55 +08001152 uint32_t active_irqs;
Mark Yao2048e322014-08-22 18:36:26 +08001153 unsigned long flags;
Mark Yao10672192015-02-04 13:10:31 +08001154 int ret = IRQ_NONE;
Mark Yao2048e322014-08-22 18:36:26 +08001155
1156 /*
Mark Yaodbb3d942015-12-15 08:36:55 +08001157 * interrupt register has interrupt status, enable and clear bits, we
Mark Yao2048e322014-08-22 18:36:26 +08001158 * must hold irq_lock to avoid a race with enable/disable_vblank().
1159 */
1160 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +08001161
1162 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
Mark Yao2048e322014-08-22 18:36:26 +08001163 /* Clear all active interrupt sources */
1164 if (active_irqs)
Mark Yaodbb3d942015-12-15 08:36:55 +08001165 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
1166
Mark Yao2048e322014-08-22 18:36:26 +08001167 spin_unlock_irqrestore(&vop->irq_lock, flags);
1168
1169 /* This is expected for vop iommu irqs, since the irq is shared */
1170 if (!active_irqs)
1171 return IRQ_NONE;
1172
Mark Yao10672192015-02-04 13:10:31 +08001173 if (active_irqs & DSP_HOLD_VALID_INTR) {
1174 complete(&vop->dsp_hold_completion);
1175 active_irqs &= ~DSP_HOLD_VALID_INTR;
1176 ret = IRQ_HANDLED;
Mark Yao2048e322014-08-22 18:36:26 +08001177 }
1178
Yakir Yang69c34e42016-07-24 14:57:40 +08001179 if (active_irqs & LINE_FLAG_INTR) {
1180 complete(&vop->line_flag_completion);
1181 active_irqs &= ~LINE_FLAG_INTR;
1182 ret = IRQ_HANDLED;
1183 }
1184
Mark Yao10672192015-02-04 13:10:31 +08001185 if (active_irqs & FS_INTR) {
Mark Yaob5f7b752015-11-23 15:21:08 +08001186 drm_crtc_handle_vblank(crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001187 vop_handle_vblank(vop);
Mark Yao10672192015-02-04 13:10:31 +08001188 active_irqs &= ~FS_INTR;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001189 ret = IRQ_HANDLED;
Mark Yao10672192015-02-04 13:10:31 +08001190 }
Mark Yao2048e322014-08-22 18:36:26 +08001191
Mark Yao10672192015-02-04 13:10:31 +08001192 /* Unhandled irqs are spurious. */
1193 if (active_irqs)
Sean Paulee4d7892016-08-12 13:00:54 -04001194 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
1195 active_irqs);
Mark Yao10672192015-02-04 13:10:31 +08001196
1197 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001198}
1199
1200static int vop_create_crtc(struct vop *vop)
1201{
1202 const struct vop_data *vop_data = vop->data;
1203 struct device *dev = vop->dev;
1204 struct drm_device *drm_dev = vop->drm_dev;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001205 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001206 struct drm_crtc *crtc = &vop->crtc;
1207 struct device_node *port;
1208 int ret;
1209 int i;
1210
1211 /*
1212 * Create drm_plane for primary and cursor planes first, since we need
1213 * to pass them to drm_crtc_init_with_planes, which sets the
1214 * "possible_crtcs" to the newly initialized crtc.
1215 */
1216 for (i = 0; i < vop_data->win_size; i++) {
1217 struct vop_win *vop_win = &vop->win[i];
1218 const struct vop_win_data *win_data = vop_win->data;
1219
1220 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1221 win_data->type != DRM_PLANE_TYPE_CURSOR)
1222 continue;
1223
1224 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1225 0, &vop_plane_funcs,
1226 win_data->phy->data_formats,
1227 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001228 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001229 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001230 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
1231 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001232 goto err_cleanup_planes;
1233 }
1234
1235 plane = &vop_win->base;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001236 drm_plane_helper_add(plane, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001237 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1238 primary = plane;
1239 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1240 cursor = plane;
1241 }
1242
1243 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
Ville Syrjäläf9882872015-12-09 16:19:31 +02001244 &vop_crtc_funcs, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001245 if (ret)
Douglas Anderson328b51c2016-03-07 14:00:52 -08001246 goto err_cleanup_planes;
Mark Yao2048e322014-08-22 18:36:26 +08001247
1248 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1249
1250 /*
1251 * Create drm_planes for overlay windows with possible_crtcs restricted
1252 * to the newly created crtc.
1253 */
1254 for (i = 0; i < vop_data->win_size; i++) {
1255 struct vop_win *vop_win = &vop->win[i];
1256 const struct vop_win_data *win_data = vop_win->data;
1257 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1258
1259 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1260 continue;
1261
1262 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1263 possible_crtcs,
1264 &vop_plane_funcs,
1265 win_data->phy->data_formats,
1266 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001267 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001268 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001269 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
1270 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001271 goto err_cleanup_crtc;
1272 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001273 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001274 }
1275
1276 port = of_get_child_by_name(dev->of_node, "port");
1277 if (!port) {
Sean Paulee4d7892016-08-12 13:00:54 -04001278 DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
1279 dev->of_node->full_name);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001280 ret = -ENOENT;
Mark Yao2048e322014-08-22 18:36:26 +08001281 goto err_cleanup_crtc;
1282 }
1283
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001284 drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
1285 vop_fb_unref_worker);
1286
Mark Yao10672192015-02-04 13:10:31 +08001287 init_completion(&vop->dsp_hold_completion);
Yakir Yang69c34e42016-07-24 14:57:40 +08001288 init_completion(&vop->line_flag_completion);
Mark Yao2048e322014-08-22 18:36:26 +08001289 crtc->port = port;
Mark Yaob5f7b752015-11-23 15:21:08 +08001290 rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001291
1292 return 0;
1293
1294err_cleanup_crtc:
1295 drm_crtc_cleanup(crtc);
1296err_cleanup_planes:
Douglas Anderson328b51c2016-03-07 14:00:52 -08001297 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1298 head)
Mark Yao2048e322014-08-22 18:36:26 +08001299 drm_plane_cleanup(plane);
1300 return ret;
1301}
1302
1303static void vop_destroy_crtc(struct vop *vop)
1304{
1305 struct drm_crtc *crtc = &vop->crtc;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001306 struct drm_device *drm_dev = vop->drm_dev;
1307 struct drm_plane *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001308
Mark Yaob5f7b752015-11-23 15:21:08 +08001309 rockchip_unregister_crtc_funcs(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001310 of_node_put(crtc->port);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001311
1312 /*
1313 * We need to cleanup the planes now. Why?
1314 *
1315 * The planes are "&vop->win[i].base". That means the memory is
1316 * all part of the big "struct vop" chunk of memory. That memory
1317 * was devm allocated and associated with this component. We need to
1318 * free it ourselves before vop_unbind() finishes.
1319 */
1320 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1321 head)
1322 vop_plane_destroy(plane);
1323
1324 /*
1325 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1326 * references the CRTC.
1327 */
Mark Yao2048e322014-08-22 18:36:26 +08001328 drm_crtc_cleanup(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001329 drm_flip_work_cleanup(&vop->fb_unref_work);
Mark Yao2048e322014-08-22 18:36:26 +08001330}
1331
1332static int vop_initial(struct vop *vop)
1333{
1334 const struct vop_data *vop_data = vop->data;
1335 const struct vop_reg_data *init_table = vop_data->init_table;
1336 struct reset_control *ahb_rst;
1337 int i, ret;
1338
1339 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1340 if (IS_ERR(vop->hclk)) {
1341 dev_err(vop->dev, "failed to get hclk source\n");
1342 return PTR_ERR(vop->hclk);
1343 }
1344 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1345 if (IS_ERR(vop->aclk)) {
1346 dev_err(vop->dev, "failed to get aclk source\n");
1347 return PTR_ERR(vop->aclk);
1348 }
1349 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1350 if (IS_ERR(vop->dclk)) {
1351 dev_err(vop->dev, "failed to get dclk source\n");
1352 return PTR_ERR(vop->dclk);
1353 }
1354
Mark Yao2048e322014-08-22 18:36:26 +08001355 ret = clk_prepare(vop->dclk);
1356 if (ret < 0) {
1357 dev_err(vop->dev, "failed to prepare dclk\n");
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001358 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001359 }
1360
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001361 /* Enable both the hclk and aclk to setup the vop */
1362 ret = clk_prepare_enable(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001363 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001364 dev_err(vop->dev, "failed to prepare/enable hclk\n");
Mark Yao2048e322014-08-22 18:36:26 +08001365 goto err_unprepare_dclk;
1366 }
1367
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001368 ret = clk_prepare_enable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001369 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001370 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1371 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +08001372 }
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001373
Mark Yao2048e322014-08-22 18:36:26 +08001374 /*
1375 * do hclk_reset, reset all vop registers.
1376 */
1377 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1378 if (IS_ERR(ahb_rst)) {
1379 dev_err(vop->dev, "failed to get ahb reset\n");
1380 ret = PTR_ERR(ahb_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001381 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001382 }
1383 reset_control_assert(ahb_rst);
1384 usleep_range(10, 20);
1385 reset_control_deassert(ahb_rst);
1386
1387 memcpy(vop->regsbak, vop->regs, vop->len);
1388
1389 for (i = 0; i < vop_data->table_size; i++)
1390 vop_writel(vop, init_table[i].offset, init_table[i].value);
1391
1392 for (i = 0; i < vop_data->win_size; i++) {
1393 const struct vop_win_data *win = &vop_data->win[i];
1394
1395 VOP_WIN_SET(vop, win, enable, 0);
1396 }
1397
1398 vop_cfg_done(vop);
1399
1400 /*
1401 * do dclk_reset, let all config take affect.
1402 */
1403 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1404 if (IS_ERR(vop->dclk_rst)) {
1405 dev_err(vop->dev, "failed to get dclk reset\n");
1406 ret = PTR_ERR(vop->dclk_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001407 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001408 }
1409 reset_control_assert(vop->dclk_rst);
1410 usleep_range(10, 20);
1411 reset_control_deassert(vop->dclk_rst);
1412
1413 clk_disable(vop->hclk);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001414 clk_disable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001415
Mark Yao31e980c2015-01-22 14:37:56 +08001416 vop->is_enabled = false;
Mark Yao2048e322014-08-22 18:36:26 +08001417
1418 return 0;
1419
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001420err_disable_aclk:
1421 clk_disable_unprepare(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001422err_disable_hclk:
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001423 clk_disable_unprepare(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001424err_unprepare_dclk:
1425 clk_unprepare(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +08001426 return ret;
1427}
1428
1429/*
1430 * Initialize the vop->win array elements.
1431 */
1432static void vop_win_init(struct vop *vop)
1433{
1434 const struct vop_data *vop_data = vop->data;
1435 unsigned int i;
1436
1437 for (i = 0; i < vop_data->win_size; i++) {
1438 struct vop_win *vop_win = &vop->win[i];
1439 const struct vop_win_data *win_data = &vop_data->win[i];
1440
1441 vop_win->data = win_data;
1442 vop_win->vop = vop;
Mark Yao2048e322014-08-22 18:36:26 +08001443 }
1444}
1445
Yakir Yang69c34e42016-07-24 14:57:40 +08001446/**
1447 * rockchip_drm_wait_line_flag - acqiure the give line flag event
1448 * @crtc: CRTC to enable line flag
1449 * @line_num: interested line number
1450 * @mstimeout: millisecond for timeout
1451 *
1452 * Driver would hold here until the interested line flag interrupt have
1453 * happened or timeout to wait.
1454 *
1455 * Returns:
1456 * Zero on success, negative errno on failure.
1457 */
1458int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
1459 unsigned int mstimeout)
1460{
1461 struct vop *vop = to_vop(crtc);
1462 unsigned long jiffies_left;
1463
1464 if (!crtc || !vop->is_enabled)
1465 return -ENODEV;
1466
1467 if (line_num > crtc->mode.vtotal || mstimeout <= 0)
1468 return -EINVAL;
1469
1470 if (vop_line_flag_irq_is_enabled(vop))
1471 return -EBUSY;
1472
1473 reinit_completion(&vop->line_flag_completion);
1474 vop_line_flag_irq_enable(vop, line_num);
1475
1476 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
1477 msecs_to_jiffies(mstimeout));
1478 vop_line_flag_irq_disable(vop);
1479
1480 if (jiffies_left == 0) {
1481 dev_err(vop->dev, "Timeout waiting for IRQ\n");
1482 return -ETIMEDOUT;
1483 }
1484
1485 return 0;
1486}
1487EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
1488
Mark Yao2048e322014-08-22 18:36:26 +08001489static int vop_bind(struct device *dev, struct device *master, void *data)
1490{
1491 struct platform_device *pdev = to_platform_device(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001492 const struct vop_data *vop_data;
1493 struct drm_device *drm_dev = data;
1494 struct vop *vop;
1495 struct resource *res;
1496 size_t alloc_size;
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001497 int ret, irq;
Mark Yao2048e322014-08-22 18:36:26 +08001498
Mark Yaoa67719d2015-12-15 08:58:26 +08001499 vop_data = of_device_get_match_data(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001500 if (!vop_data)
1501 return -ENODEV;
1502
1503 /* Allocate vop struct and its vop_win array */
1504 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1505 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1506 if (!vop)
1507 return -ENOMEM;
1508
1509 vop->dev = dev;
1510 vop->data = vop_data;
1511 vop->drm_dev = drm_dev;
1512 dev_set_drvdata(dev, vop);
1513
1514 vop_win_init(vop);
1515
1516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1517 vop->len = resource_size(res);
1518 vop->regs = devm_ioremap_resource(dev, res);
1519 if (IS_ERR(vop->regs))
1520 return PTR_ERR(vop->regs);
1521
1522 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1523 if (!vop->regsbak)
1524 return -ENOMEM;
1525
1526 ret = vop_initial(vop);
1527 if (ret < 0) {
1528 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1529 return ret;
1530 }
1531
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001532 irq = platform_get_irq(pdev, 0);
1533 if (irq < 0) {
Mark Yao2048e322014-08-22 18:36:26 +08001534 dev_err(dev, "cannot find irq for vop\n");
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001535 return irq;
Mark Yao2048e322014-08-22 18:36:26 +08001536 }
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001537 vop->irq = (unsigned int)irq;
Mark Yao2048e322014-08-22 18:36:26 +08001538
1539 spin_lock_init(&vop->reg_lock);
1540 spin_lock_init(&vop->irq_lock);
1541
1542 mutex_init(&vop->vsync_mutex);
1543
Mark Yao63ebb9f2015-11-30 18:22:42 +08001544 ret = devm_request_irq(dev, vop->irq, vop_isr,
1545 IRQF_SHARED, dev_name(dev), vop);
Mark Yao2048e322014-08-22 18:36:26 +08001546 if (ret)
1547 return ret;
1548
1549 /* IRQ is initially disabled; it gets enabled in power_on */
1550 disable_irq(vop->irq);
1551
1552 ret = vop_create_crtc(vop);
1553 if (ret)
Sean Paul8c763c92016-09-16 14:22:03 -04001554 goto err_enable_irq;
Mark Yao2048e322014-08-22 18:36:26 +08001555
1556 pm_runtime_enable(&pdev->dev);
Yakir Yang5182c1a2016-07-24 14:57:44 +08001557
Mark Yao2048e322014-08-22 18:36:26 +08001558 return 0;
Sean Paul8c763c92016-09-16 14:22:03 -04001559
1560err_enable_irq:
1561 enable_irq(vop->irq); /* To balance out the disable_irq above */
1562 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001563}
1564
1565static void vop_unbind(struct device *dev, struct device *master, void *data)
1566{
1567 struct vop *vop = dev_get_drvdata(dev);
1568
1569 pm_runtime_disable(dev);
1570 vop_destroy_crtc(vop);
1571}
1572
Mark Yaoa67719d2015-12-15 08:58:26 +08001573const struct component_ops vop_component_ops = {
Mark Yao2048e322014-08-22 18:36:26 +08001574 .bind = vop_bind,
1575 .unbind = vop_unbind,
1576};
Stephen Rothwell54255e82015-12-31 13:40:11 +11001577EXPORT_SYMBOL_GPL(vop_component_ops);