blob: 5bed63eee5f056748174433b4322ae1ff7d20b95 [file] [log] [blame]
Mark Yao2048e322014-08-22 18:36:26 +08001/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
Mark Yao63ebb9f2015-11-30 18:22:42 +080017#include <drm/drm_atomic.h>
Mark Yao2048e322014-08-22 18:36:26 +080018#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
Tomasz Figa47a7eb42016-09-14 21:54:57 +090020#include <drm/drm_flip_work.h>
Mark Yao2048e322014-08-22 18:36:26 +080021#include <drm/drm_plane_helper.h>
22
23#include <linux/kernel.h>
Paul Gortmaker00fe6142015-05-01 20:02:30 -040024#include <linux/module.h>
Mark Yao2048e322014-08-22 18:36:26 +080025#include <linux/platform_device.h>
26#include <linux/clk.h>
Tomasz Figa7caecdb2016-09-14 21:54:56 +090027#include <linux/iopoll.h>
Mark Yao2048e322014-08-22 18:36:26 +080028#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/component.h>
32
33#include <linux/reset.h>
34#include <linux/delay.h>
35
36#include "rockchip_drm_drv.h"
37#include "rockchip_drm_gem.h"
38#include "rockchip_drm_fb.h"
Yakir Yang5182c1a2016-07-24 14:57:44 +080039#include "rockchip_drm_psr.h"
Mark Yao2048e322014-08-22 18:36:26 +080040#include "rockchip_drm_vop.h"
41
Mark Yaod49463e2016-04-20 14:18:15 +080042#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
43 vop_mask_write(x, off, mask, shift, v, write_mask, true)
44
45#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
46 vop_mask_write(x, off, mask, shift, v, write_mask, false)
Mark Yao2048e322014-08-22 18:36:26 +080047
48#define REG_SET(x, base, reg, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080049 __REG_SET_##mode(x, base + reg.offset, \
50 reg.mask, reg.shift, v, reg.write_mask)
John Keepingc7647f82016-01-12 18:05:18 +000051#define REG_SET_MASK(x, base, reg, mask, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080052 __REG_SET_##mode(x, base + reg.offset, \
53 mask, reg.shift, v, reg.write_mask)
Mark Yao2048e322014-08-22 18:36:26 +080054
55#define VOP_WIN_SET(x, win, name, v) \
56 REG_SET(x, win->base, win->phy->name, v, RELAXED)
Mark Yao4c156c22015-06-26 17:14:46 +080057#define VOP_SCL_SET(x, win, name, v) \
58 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
Mark Yao1194fff2015-12-15 09:08:43 +080059#define VOP_SCL_SET_EXT(x, win, name, v) \
60 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
Mark Yao2048e322014-08-22 18:36:26 +080061#define VOP_CTRL_SET(x, name, v) \
62 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
63
Mark Yaodbb3d942015-12-15 08:36:55 +080064#define VOP_INTR_GET(vop, name) \
65 vop_read_reg(vop, 0, &vop->data->ctrl->name)
66
John Keepingc7647f82016-01-12 18:05:18 +000067#define VOP_INTR_SET(vop, name, mask, v) \
68 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
Mark Yaodbb3d942015-12-15 08:36:55 +080069#define VOP_INTR_SET_TYPE(vop, name, type, v) \
70 do { \
John Keepingc7647f82016-01-12 18:05:18 +000071 int i, reg = 0, mask = 0; \
Mark Yaodbb3d942015-12-15 08:36:55 +080072 for (i = 0; i < vop->data->intr->nintrs; i++) { \
John Keepingc7647f82016-01-12 18:05:18 +000073 if (vop->data->intr->intrs[i] & type) { \
Mark Yaodbb3d942015-12-15 08:36:55 +080074 reg |= (v) << i; \
John Keepingc7647f82016-01-12 18:05:18 +000075 mask |= 1 << i; \
76 } \
Mark Yaodbb3d942015-12-15 08:36:55 +080077 } \
John Keepingc7647f82016-01-12 18:05:18 +000078 VOP_INTR_SET(vop, name, mask, reg); \
Mark Yaodbb3d942015-12-15 08:36:55 +080079 } while (0)
80#define VOP_INTR_GET_TYPE(vop, name, type) \
81 vop_get_intr_type(vop, &vop->data->intr->name, type)
82
Mark Yao2048e322014-08-22 18:36:26 +080083#define VOP_WIN_GET(x, win, name) \
84 vop_read_reg(x, win->base, &win->phy->name)
85
86#define VOP_WIN_GET_YRGBADDR(vop, win) \
87 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
88
89#define to_vop(x) container_of(x, struct vop, crtc)
90#define to_vop_win(x) container_of(x, struct vop_win, base)
91
Tomasz Figa47a7eb42016-09-14 21:54:57 +090092enum vop_pending {
93 VOP_PENDING_FB_UNREF,
94};
95
Mark Yao2048e322014-08-22 18:36:26 +080096struct vop_win {
97 struct drm_plane base;
98 const struct vop_win_data *data;
99 struct vop *vop;
Mark Yao2048e322014-08-22 18:36:26 +0800100};
101
102struct vop {
103 struct drm_crtc crtc;
104 struct device *dev;
105 struct drm_device *drm_dev;
Mark Yao31e980c2015-01-22 14:37:56 +0800106 bool is_enabled;
Mark Yao2048e322014-08-22 18:36:26 +0800107
Mark Yao2048e322014-08-22 18:36:26 +0800108 /* mutex vsync_ work */
109 struct mutex vsync_mutex;
110 bool vsync_work_pending;
Mark Yao10672192015-02-04 13:10:31 +0800111 struct completion dsp_hold_completion;
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200112
113 /* protected by dev->event_lock */
Mark Yao63ebb9f2015-11-30 18:22:42 +0800114 struct drm_pending_vblank_event *event;
Mark Yao2048e322014-08-22 18:36:26 +0800115
Tomasz Figa47a7eb42016-09-14 21:54:57 +0900116 struct drm_flip_work fb_unref_work;
117 unsigned long pending;
118
Yakir Yang69c34e42016-07-24 14:57:40 +0800119 struct completion line_flag_completion;
120
Mark Yao2048e322014-08-22 18:36:26 +0800121 const struct vop_data *data;
122
123 uint32_t *regsbak;
124 void __iomem *regs;
125
126 /* physical map length of vop register */
127 uint32_t len;
128
129 /* one time only one process allowed to config the register */
130 spinlock_t reg_lock;
131 /* lock vop irq reg */
132 spinlock_t irq_lock;
133
134 unsigned int irq;
135
136 /* vop AHP clk */
137 struct clk *hclk;
138 /* vop dclk */
139 struct clk *dclk;
140 /* vop share memory frequency */
141 struct clk *aclk;
142
143 /* vop dclk reset */
144 struct reset_control *dclk_rst;
145
Mark Yao2048e322014-08-22 18:36:26 +0800146 struct vop_win win[];
147};
148
Mark Yao2048e322014-08-22 18:36:26 +0800149static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
150{
151 writel(v, vop->regs + offset);
152 vop->regsbak[offset >> 2] = v;
153}
154
155static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
156{
157 return readl(vop->regs + offset);
158}
159
160static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
161 const struct vop_reg *reg)
162{
163 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
164}
165
Mark Yao2048e322014-08-22 18:36:26 +0800166static inline void vop_mask_write(struct vop *vop, uint32_t offset,
Mark Yaod49463e2016-04-20 14:18:15 +0800167 uint32_t mask, uint32_t shift, uint32_t v,
168 bool write_mask, bool relaxed)
Mark Yao2048e322014-08-22 18:36:26 +0800169{
Mark Yaod49463e2016-04-20 14:18:15 +0800170 if (!mask)
171 return;
172
173 if (write_mask) {
174 v = ((v << shift) & 0xffff) | (mask << (shift + 16));
175 } else {
Mark Yao2048e322014-08-22 18:36:26 +0800176 uint32_t cached_val = vop->regsbak[offset >> 2];
177
Mark Yaod49463e2016-04-20 14:18:15 +0800178 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
179 vop->regsbak[offset >> 2] = v;
Mark Yao2048e322014-08-22 18:36:26 +0800180 }
Mark Yao2048e322014-08-22 18:36:26 +0800181
Mark Yaod49463e2016-04-20 14:18:15 +0800182 if (relaxed)
183 writel_relaxed(v, vop->regs + offset);
184 else
185 writel(v, vop->regs + offset);
Mark Yao2048e322014-08-22 18:36:26 +0800186}
187
Mark Yaodbb3d942015-12-15 08:36:55 +0800188static inline uint32_t vop_get_intr_type(struct vop *vop,
189 const struct vop_reg *reg, int type)
190{
191 uint32_t i, ret = 0;
192 uint32_t regs = vop_read_reg(vop, 0, reg);
193
194 for (i = 0; i < vop->data->intr->nintrs; i++) {
195 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
196 ret |= vop->data->intr->intrs[i];
197 }
198
199 return ret;
200}
201
Mark Yao0cf33fe2015-12-14 18:14:36 +0800202static inline void vop_cfg_done(struct vop *vop)
203{
204 VOP_CTRL_SET(vop, cfg_done, 1);
205}
206
Tomasz Figa85a359f2015-05-11 19:55:39 +0900207static bool has_rb_swapped(uint32_t format)
208{
209 switch (format) {
210 case DRM_FORMAT_XBGR8888:
211 case DRM_FORMAT_ABGR8888:
212 case DRM_FORMAT_BGR888:
213 case DRM_FORMAT_BGR565:
214 return true;
215 default:
216 return false;
217 }
218}
219
Mark Yao2048e322014-08-22 18:36:26 +0800220static enum vop_data_format vop_convert_format(uint32_t format)
221{
222 switch (format) {
223 case DRM_FORMAT_XRGB8888:
224 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900225 case DRM_FORMAT_XBGR8888:
226 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800227 return VOP_FMT_ARGB8888;
228 case DRM_FORMAT_RGB888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900229 case DRM_FORMAT_BGR888:
Mark Yao2048e322014-08-22 18:36:26 +0800230 return VOP_FMT_RGB888;
231 case DRM_FORMAT_RGB565:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900232 case DRM_FORMAT_BGR565:
Mark Yao2048e322014-08-22 18:36:26 +0800233 return VOP_FMT_RGB565;
234 case DRM_FORMAT_NV12:
235 return VOP_FMT_YUV420SP;
236 case DRM_FORMAT_NV16:
237 return VOP_FMT_YUV422SP;
238 case DRM_FORMAT_NV24:
239 return VOP_FMT_YUV444SP;
240 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400241 DRM_ERROR("unsupported format[%08x]\n", format);
Mark Yao2048e322014-08-22 18:36:26 +0800242 return -EINVAL;
243 }
244}
245
Mark Yao84c7f8c2015-07-20 16:16:49 +0800246static bool is_yuv_support(uint32_t format)
247{
248 switch (format) {
249 case DRM_FORMAT_NV12:
250 case DRM_FORMAT_NV16:
251 case DRM_FORMAT_NV24:
252 return true;
253 default:
254 return false;
255 }
256}
257
Mark Yao2048e322014-08-22 18:36:26 +0800258static bool is_alpha_support(uint32_t format)
259{
260 switch (format) {
261 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900262 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800263 return true;
264 default:
265 return false;
266 }
267}
268
Mark Yao4c156c22015-06-26 17:14:46 +0800269static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
270 uint32_t dst, bool is_horizontal,
271 int vsu_mode, int *vskiplines)
272{
273 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
274
275 if (is_horizontal) {
276 if (mode == SCALE_UP)
277 val = GET_SCL_FT_BIC(src, dst);
278 else if (mode == SCALE_DOWN)
279 val = GET_SCL_FT_BILI_DN(src, dst);
280 } else {
281 if (mode == SCALE_UP) {
282 if (vsu_mode == SCALE_UP_BIL)
283 val = GET_SCL_FT_BILI_UP(src, dst);
284 else
285 val = GET_SCL_FT_BIC(src, dst);
286 } else if (mode == SCALE_DOWN) {
287 if (vskiplines) {
288 *vskiplines = scl_get_vskiplines(src, dst);
289 val = scl_get_bili_dn_vskip(src, dst,
290 *vskiplines);
291 } else {
292 val = GET_SCL_FT_BILI_DN(src, dst);
293 }
294 }
295 }
296
297 return val;
298}
299
300static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
301 uint32_t src_w, uint32_t src_h, uint32_t dst_w,
302 uint32_t dst_h, uint32_t pixel_format)
303{
304 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
305 uint16_t cbcr_hor_scl_mode = SCALE_NONE;
306 uint16_t cbcr_ver_scl_mode = SCALE_NONE;
307 int hsub = drm_format_horz_chroma_subsampling(pixel_format);
308 int vsub = drm_format_vert_chroma_subsampling(pixel_format);
309 bool is_yuv = is_yuv_support(pixel_format);
310 uint16_t cbcr_src_w = src_w / hsub;
311 uint16_t cbcr_src_h = src_h / vsub;
312 uint16_t vsu_mode;
313 uint16_t lb_mode;
314 uint32_t val;
Mark Yao2db00cf2016-04-29 15:39:53 +0800315 int vskiplines = 0;
Mark Yao4c156c22015-06-26 17:14:46 +0800316
317 if (dst_w > 3840) {
Sean Paulee4d7892016-08-12 13:00:54 -0400318 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800319 return;
320 }
321
Mark Yao1194fff2015-12-15 09:08:43 +0800322 if (!win->phy->scl->ext) {
323 VOP_SCL_SET(vop, win, scale_yrgb_x,
324 scl_cal_scale2(src_w, dst_w));
325 VOP_SCL_SET(vop, win, scale_yrgb_y,
326 scl_cal_scale2(src_h, dst_h));
327 if (is_yuv) {
328 VOP_SCL_SET(vop, win, scale_cbcr_x,
Mark Yaoee8662f2016-06-06 15:58:46 +0800329 scl_cal_scale2(cbcr_src_w, dst_w));
Mark Yao1194fff2015-12-15 09:08:43 +0800330 VOP_SCL_SET(vop, win, scale_cbcr_y,
Mark Yaoee8662f2016-06-06 15:58:46 +0800331 scl_cal_scale2(cbcr_src_h, dst_h));
Mark Yao1194fff2015-12-15 09:08:43 +0800332 }
333 return;
334 }
335
Mark Yao4c156c22015-06-26 17:14:46 +0800336 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
337 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
338
339 if (is_yuv) {
340 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
341 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
342 if (cbcr_hor_scl_mode == SCALE_DOWN)
343 lb_mode = scl_vop_cal_lb_mode(dst_w, true);
344 else
345 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
346 } else {
347 if (yrgb_hor_scl_mode == SCALE_DOWN)
348 lb_mode = scl_vop_cal_lb_mode(dst_w, false);
349 else
350 lb_mode = scl_vop_cal_lb_mode(src_w, false);
351 }
352
Mark Yao1194fff2015-12-15 09:08:43 +0800353 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800354 if (lb_mode == LB_RGB_3840X2) {
355 if (yrgb_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400356 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800357 return;
358 }
359 if (cbcr_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400360 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800361 return;
362 }
363 vsu_mode = SCALE_UP_BIL;
364 } else if (lb_mode == LB_RGB_2560X4) {
365 vsu_mode = SCALE_UP_BIL;
366 } else {
367 vsu_mode = SCALE_UP_BIC;
368 }
369
370 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
371 true, 0, NULL);
372 VOP_SCL_SET(vop, win, scale_yrgb_x, val);
373 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
374 false, vsu_mode, &vskiplines);
375 VOP_SCL_SET(vop, win, scale_yrgb_y, val);
376
Mark Yao1194fff2015-12-15 09:08:43 +0800377 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
378 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
Mark Yao4c156c22015-06-26 17:14:46 +0800379
Mark Yao1194fff2015-12-15 09:08:43 +0800380 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
381 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
382 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
383 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
384 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800385 if (is_yuv) {
386 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
387 dst_w, true, 0, NULL);
388 VOP_SCL_SET(vop, win, scale_cbcr_x, val);
389 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
390 dst_h, false, vsu_mode, &vskiplines);
391 VOP_SCL_SET(vop, win, scale_cbcr_y, val);
392
Mark Yao1194fff2015-12-15 09:08:43 +0800393 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
394 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
395 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
396 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
397 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
398 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
399 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800400 }
401}
402
Mark Yao10672192015-02-04 13:10:31 +0800403static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
404{
405 unsigned long flags;
406
407 if (WARN_ON(!vop->is_enabled))
408 return;
409
410 spin_lock_irqsave(&vop->irq_lock, flags);
411
Tomasz Figafa374102016-09-14 21:54:54 +0900412 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800413 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
Mark Yao10672192015-02-04 13:10:31 +0800414
415 spin_unlock_irqrestore(&vop->irq_lock, flags);
416}
417
418static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
419{
420 unsigned long flags;
421
422 if (WARN_ON(!vop->is_enabled))
423 return;
424
425 spin_lock_irqsave(&vop->irq_lock, flags);
426
Mark Yaodbb3d942015-12-15 08:36:55 +0800427 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
Mark Yao10672192015-02-04 13:10:31 +0800428
429 spin_unlock_irqrestore(&vop->irq_lock, flags);
430}
431
Yakir Yang69c34e42016-07-24 14:57:40 +0800432/*
433 * (1) each frame starts at the start of the Vsync pulse which is signaled by
434 * the "FRAME_SYNC" interrupt.
435 * (2) the active data region of each frame ends at dsp_vact_end
436 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
437 * to get "LINE_FLAG" interrupt at the end of the active on screen data.
438 *
439 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
440 * Interrupts
441 * LINE_FLAG -------------------------------+
442 * FRAME_SYNC ----+ |
443 * | |
444 * v v
445 * | Vsync | Vbp | Vactive | Vfp |
446 * ^ ^ ^ ^
447 * | | | |
448 * | | | |
449 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
450 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
451 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
452 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
453 */
454static bool vop_line_flag_irq_is_enabled(struct vop *vop)
455{
456 uint32_t line_flag_irq;
457 unsigned long flags;
458
459 spin_lock_irqsave(&vop->irq_lock, flags);
460
461 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
462
463 spin_unlock_irqrestore(&vop->irq_lock, flags);
464
465 return !!line_flag_irq;
466}
467
468static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
469{
470 unsigned long flags;
471
472 if (WARN_ON(!vop->is_enabled))
473 return;
474
475 spin_lock_irqsave(&vop->irq_lock, flags);
476
477 VOP_CTRL_SET(vop, line_flag_num[0], line_num);
Tomasz Figafa374102016-09-14 21:54:54 +0900478 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
Yakir Yang69c34e42016-07-24 14:57:40 +0800479 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
480
481 spin_unlock_irqrestore(&vop->irq_lock, flags);
482}
483
484static void vop_line_flag_irq_disable(struct vop *vop)
485{
486 unsigned long flags;
487
488 if (WARN_ON(!vop->is_enabled))
489 return;
490
491 spin_lock_irqsave(&vop->irq_lock, flags);
492
493 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
494
495 spin_unlock_irqrestore(&vop->irq_lock, flags);
496}
497
Sean Paul39a9ad82016-08-15 16:12:29 -0700498static int vop_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800499{
500 struct vop *vop = to_vop(crtc);
501 int ret;
502
Mark Yao5d82d1a2015-04-01 13:48:53 +0800503 ret = pm_runtime_get_sync(vop->dev);
504 if (ret < 0) {
505 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
Jeffy Chen543a1812017-04-06 20:31:20 +0800506 return ret;
Mark Yao5d82d1a2015-04-01 13:48:53 +0800507 }
508
Mark Yao2048e322014-08-22 18:36:26 +0800509 ret = clk_enable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700510 if (WARN_ON(ret < 0))
511 goto err_put_pm_runtime;
Mark Yao2048e322014-08-22 18:36:26 +0800512
513 ret = clk_enable(vop->dclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700514 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800515 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +0800516
517 ret = clk_enable(vop->aclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700518 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800519 goto err_disable_dclk;
Mark Yao2048e322014-08-22 18:36:26 +0800520
521 /*
522 * Slave iommu shares power, irq and clock with vop. It was associated
523 * automatically with this master device via common driver code.
524 * Now that we have enabled the clock we attach it to the shared drm
525 * mapping.
526 */
527 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
528 if (ret) {
529 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
530 goto err_disable_aclk;
531 }
532
Mark Yao77faa162015-07-20 16:25:20 +0800533 memcpy(vop->regs, vop->regsbak, vop->len);
Mark Yao52ab7892015-01-22 18:29:57 +0800534 /*
535 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
536 */
537 vop->is_enabled = true;
538
Mark Yao2048e322014-08-22 18:36:26 +0800539 spin_lock(&vop->reg_lock);
540
541 VOP_CTRL_SET(vop, standby, 0);
542
543 spin_unlock(&vop->reg_lock);
544
545 enable_irq(vop->irq);
546
Mark Yaob5f7b752015-11-23 15:21:08 +0800547 drm_crtc_vblank_on(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800548
Sean Paul39a9ad82016-08-15 16:12:29 -0700549 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800550
551err_disable_aclk:
552 clk_disable(vop->aclk);
553err_disable_dclk:
554 clk_disable(vop->dclk);
555err_disable_hclk:
556 clk_disable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700557err_put_pm_runtime:
558 pm_runtime_put_sync(vop->dev);
559 return ret;
Mark Yao2048e322014-08-22 18:36:26 +0800560}
561
Mark Yao0ad36752015-11-09 11:33:16 +0800562static void vop_crtc_disable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800563{
564 struct vop *vop = to_vop(crtc);
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100565 int i;
Mark Yao2048e322014-08-22 18:36:26 +0800566
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200567 WARN_ON(vop->event);
568
Sean Paulb883c9b2016-08-18 12:01:46 -0700569 rockchip_drm_psr_deactivate(&vop->crtc);
570
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100571 /*
572 * We need to make sure that all windows are disabled before we
573 * disable that crtc. Otherwise we might try to scan from a destroyed
574 * buffer later.
575 */
576 for (i = 0; i < vop->data->win_size; i++) {
577 struct vop_win *vop_win = &vop->win[i];
578 const struct vop_win_data *win = vop_win->data;
579
580 spin_lock(&vop->reg_lock);
581 VOP_WIN_SET(vop, win, enable, 0);
582 spin_unlock(&vop->reg_lock);
583 }
584
Mark Yaob5f7b752015-11-23 15:21:08 +0800585 drm_crtc_vblank_off(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800586
Mark Yao2048e322014-08-22 18:36:26 +0800587 /*
Mark Yao10672192015-02-04 13:10:31 +0800588 * Vop standby will take effect at end of current frame,
589 * if dsp hold valid irq happen, it means standby complete.
590 *
591 * we must wait standby complete when we want to disable aclk,
592 * if not, memory bus maybe dead.
Mark Yao2048e322014-08-22 18:36:26 +0800593 */
Mark Yao10672192015-02-04 13:10:31 +0800594 reinit_completion(&vop->dsp_hold_completion);
595 vop_dsp_hold_valid_irq_enable(vop);
596
Mark Yao2048e322014-08-22 18:36:26 +0800597 spin_lock(&vop->reg_lock);
598
599 VOP_CTRL_SET(vop, standby, 1);
600
601 spin_unlock(&vop->reg_lock);
Mark Yao52ab7892015-01-22 18:29:57 +0800602
Mark Yao10672192015-02-04 13:10:31 +0800603 wait_for_completion(&vop->dsp_hold_completion);
Mark Yao2048e322014-08-22 18:36:26 +0800604
Mark Yao10672192015-02-04 13:10:31 +0800605 vop_dsp_hold_valid_irq_disable(vop);
606
607 disable_irq(vop->irq);
608
609 vop->is_enabled = false;
610
611 /*
612 * vop standby complete, so iommu detach is safe.
613 */
Mark Yao2048e322014-08-22 18:36:26 +0800614 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
615
Mark Yao10672192015-02-04 13:10:31 +0800616 clk_disable(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +0800617 clk_disable(vop->aclk);
618 clk_disable(vop->hclk);
Mark Yao5d82d1a2015-04-01 13:48:53 +0800619 pm_runtime_put(vop->dev);
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200620
621 if (crtc->state->event && !crtc->state->active) {
622 spin_lock_irq(&crtc->dev->event_lock);
623 drm_crtc_send_vblank_event(crtc, crtc->state->event);
624 spin_unlock_irq(&crtc->dev->event_lock);
625
626 crtc->state->event = NULL;
627 }
Mark Yao2048e322014-08-22 18:36:26 +0800628}
629
Mark Yao63ebb9f2015-11-30 18:22:42 +0800630static void vop_plane_destroy(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800631{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800632 drm_plane_cleanup(plane);
Mark Yao2048e322014-08-22 18:36:26 +0800633}
634
Mark Yao63ebb9f2015-11-30 18:22:42 +0800635static int vop_plane_atomic_check(struct drm_plane *plane,
636 struct drm_plane_state *state)
Mark Yao2048e322014-08-22 18:36:26 +0800637{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800638 struct drm_crtc *crtc = state->crtc;
John Keeping92915da2016-03-04 11:04:03 +0000639 struct drm_crtc_state *crtc_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800640 struct drm_framebuffer *fb = state->fb;
Mark Yao2048e322014-08-22 18:36:26 +0800641 struct vop_win *vop_win = to_vop_win(plane);
642 const struct vop_win_data *win = vop_win->data;
Mark Yao2048e322014-08-22 18:36:26 +0800643 int ret;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800644 struct drm_rect clip;
Mark Yao4c156c22015-06-26 17:14:46 +0800645 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
646 DRM_PLANE_HELPER_NO_SCALING;
647 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
648 DRM_PLANE_HELPER_NO_SCALING;
Mark Yao2048e322014-08-22 18:36:26 +0800649
Mark Yao63ebb9f2015-11-30 18:22:42 +0800650 if (!crtc || !fb)
Tomasz Figad47a7242016-09-14 21:55:01 +0900651 return 0;
John Keeping92915da2016-03-04 11:04:03 +0000652
653 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
654 if (WARN_ON(!crtc_state))
655 return -EINVAL;
656
Mark Yao63ebb9f2015-11-30 18:22:42 +0800657 clip.x1 = 0;
658 clip.y1 = 0;
John Keeping92915da2016-03-04 11:04:03 +0000659 clip.x2 = crtc_state->adjusted_mode.hdisplay;
660 clip.y2 = crtc_state->adjusted_mode.vdisplay;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800661
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300662 ret = drm_plane_helper_check_state(state, &clip,
663 min_scale, max_scale,
664 true, true);
Mark Yao2048e322014-08-22 18:36:26 +0800665 if (ret)
666 return ret;
667
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300668 if (!state->visible)
Tomasz Figad47a7242016-09-14 21:55:01 +0900669 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800670
Tomasz Figad47a7242016-09-14 21:55:01 +0900671 ret = vop_convert_format(fb->pixel_format);
672 if (ret < 0)
673 return ret;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800674
Mark Yao63ebb9f2015-11-30 18:22:42 +0800675 /*
676 * Src.x1 can be odd when do clip, but yuv plane start point
677 * need align with 2 pixel.
678 */
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300679 if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800680 return -EINVAL;
681
Mark Yao63ebb9f2015-11-30 18:22:42 +0800682 return 0;
683}
684
685static void vop_plane_atomic_disable(struct drm_plane *plane,
686 struct drm_plane_state *old_state)
687{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800688 struct vop_win *vop_win = to_vop_win(plane);
689 const struct vop_win_data *win = vop_win->data;
690 struct vop *vop = to_vop(old_state->crtc);
691
692 if (!old_state->crtc)
693 return;
694
695 spin_lock(&vop->reg_lock);
696
697 VOP_WIN_SET(vop, win, enable, 0);
698
699 spin_unlock(&vop->reg_lock);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800700}
701
702static void vop_plane_atomic_update(struct drm_plane *plane,
703 struct drm_plane_state *old_state)
704{
705 struct drm_plane_state *state = plane->state;
706 struct drm_crtc *crtc = state->crtc;
707 struct vop_win *vop_win = to_vop_win(plane);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800708 const struct vop_win_data *win = vop_win->data;
709 struct vop *vop = to_vop(state->crtc);
710 struct drm_framebuffer *fb = state->fb;
711 unsigned int actual_w, actual_h;
712 unsigned int dsp_stx, dsp_sty;
713 uint32_t act_info, dsp_info, dsp_st;
Ville Syrjäläac920282016-07-26 19:07:01 +0300714 struct drm_rect *src = &state->src;
715 struct drm_rect *dest = &state->dst;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800716 struct drm_gem_object *obj, *uv_obj;
717 struct rockchip_gem_object *rk_obj, *rk_uv_obj;
718 unsigned long offset;
719 dma_addr_t dma_addr;
720 uint32_t val;
721 bool rb_swap;
Tomasz Figad47a7242016-09-14 21:55:01 +0900722 int format;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800723
724 /*
725 * can't update plane when vop is disabled.
726 */
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200727 if (WARN_ON(!crtc))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800728 return;
729
730 if (WARN_ON(!vop->is_enabled))
731 return;
732
Tomasz Figad47a7242016-09-14 21:55:01 +0900733 if (!state->visible) {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800734 vop_plane_atomic_disable(plane, old_state);
735 return;
736 }
Mark Yao2048e322014-08-22 18:36:26 +0800737
738 obj = rockchip_fb_get_gem_obj(fb, 0);
Mark Yao2048e322014-08-22 18:36:26 +0800739 rk_obj = to_rockchip_obj(obj);
740
Mark Yao63ebb9f2015-11-30 18:22:42 +0800741 actual_w = drm_rect_width(src) >> 16;
742 actual_h = drm_rect_height(src) >> 16;
743 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800744
Mark Yao63ebb9f2015-11-30 18:22:42 +0800745 dsp_info = (drm_rect_height(dest) - 1) << 16;
746 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
Mark Yao2048e322014-08-22 18:36:26 +0800747
Mark Yao63ebb9f2015-11-30 18:22:42 +0800748 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
749 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
750 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
Mark Yao2048e322014-08-22 18:36:26 +0800751
Mark Yao63ebb9f2015-11-30 18:22:42 +0800752 offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
753 offset += (src->y1 >> 16) * fb->pitches[0];
Tomasz Figad47a7242016-09-14 21:55:01 +0900754 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
755
756 format = vop_convert_format(fb->pixel_format);
Mark Yao2048e322014-08-22 18:36:26 +0800757
Mark Yao63ebb9f2015-11-30 18:22:42 +0800758 spin_lock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800759
Tomasz Figad47a7242016-09-14 21:55:01 +0900760 VOP_WIN_SET(vop, win, format, format);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800761 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
Tomasz Figad47a7242016-09-14 21:55:01 +0900762 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800763 if (is_yuv_support(fb->pixel_format)) {
Mark Yao84c7f8c2015-07-20 16:16:49 +0800764 int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
765 int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
766 int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
767
768 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800769 rk_uv_obj = to_rockchip_obj(uv_obj);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800770
Mark Yao63ebb9f2015-11-30 18:22:42 +0800771 offset = (src->x1 >> 16) * bpp / hsub;
772 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800773
Mark Yao63ebb9f2015-11-30 18:22:42 +0800774 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
775 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
776 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800777 }
Mark Yao4c156c22015-06-26 17:14:46 +0800778
779 if (win->phy->scl)
780 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800781 drm_rect_width(dest), drm_rect_height(dest),
Mark Yao4c156c22015-06-26 17:14:46 +0800782 fb->pixel_format);
783
Mark Yao63ebb9f2015-11-30 18:22:42 +0800784 VOP_WIN_SET(vop, win, act_info, act_info);
785 VOP_WIN_SET(vop, win, dsp_info, dsp_info);
786 VOP_WIN_SET(vop, win, dsp_st, dsp_st);
Mark Yao4c156c22015-06-26 17:14:46 +0800787
Mark Yao63ebb9f2015-11-30 18:22:42 +0800788 rb_swap = has_rb_swapped(fb->pixel_format);
Tomasz Figa85a359f2015-05-11 19:55:39 +0900789 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
Mark Yao2048e322014-08-22 18:36:26 +0800790
Mark Yao63ebb9f2015-11-30 18:22:42 +0800791 if (is_alpha_support(fb->pixel_format)) {
Mark Yao2048e322014-08-22 18:36:26 +0800792 VOP_WIN_SET(vop, win, dst_alpha_ctl,
793 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
794 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
795 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
796 SRC_BLEND_M0(ALPHA_PER_PIX) |
797 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
798 SRC_FACTOR_M0(ALPHA_ONE);
799 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
800 } else {
801 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
802 }
803
804 VOP_WIN_SET(vop, win, enable, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800805 spin_unlock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800806}
807
Mark Yao63ebb9f2015-11-30 18:22:42 +0800808static const struct drm_plane_helper_funcs plane_helper_funcs = {
809 .atomic_check = vop_plane_atomic_check,
810 .atomic_update = vop_plane_atomic_update,
811 .atomic_disable = vop_plane_atomic_disable,
812};
813
Mark Yao2048e322014-08-22 18:36:26 +0800814static const struct drm_plane_funcs vop_plane_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800815 .update_plane = drm_atomic_helper_update_plane,
816 .disable_plane = drm_atomic_helper_disable_plane,
Mark Yao2048e322014-08-22 18:36:26 +0800817 .destroy = vop_plane_destroy,
Tomasz Figad47a7242016-09-14 21:55:01 +0900818 .reset = drm_atomic_helper_plane_reset,
819 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
820 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +0800821};
822
Mark Yao2048e322014-08-22 18:36:26 +0800823static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
824{
825 struct vop *vop = to_vop(crtc);
826 unsigned long flags;
827
Mark Yao63ebb9f2015-11-30 18:22:42 +0800828 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800829 return -EPERM;
830
831 spin_lock_irqsave(&vop->irq_lock, flags);
832
Tomasz Figafa374102016-09-14 21:54:54 +0900833 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800834 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800835
836 spin_unlock_irqrestore(&vop->irq_lock, flags);
837
838 return 0;
839}
840
841static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
842{
843 struct vop *vop = to_vop(crtc);
844 unsigned long flags;
845
Mark Yao63ebb9f2015-11-30 18:22:42 +0800846 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800847 return;
Mark Yao31e980c2015-01-22 14:37:56 +0800848
Mark Yao2048e322014-08-22 18:36:26 +0800849 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +0800850
851 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
852
Mark Yao2048e322014-08-22 18:36:26 +0800853 spin_unlock_irqrestore(&vop->irq_lock, flags);
854}
855
856static const struct rockchip_crtc_funcs private_crtc_funcs = {
857 .enable_vblank = vop_crtc_enable_vblank,
858 .disable_vblank = vop_crtc_disable_vblank,
859};
860
Mark Yao2048e322014-08-22 18:36:26 +0800861static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
862 const struct drm_display_mode *mode,
863 struct drm_display_mode *adjusted_mode)
864{
Chris Zhongb59b8de2016-01-06 12:03:53 +0800865 struct vop *vop = to_vop(crtc);
866
Chris Zhongb59b8de2016-01-06 12:03:53 +0800867 adjusted_mode->clock =
Douglas Anderson1d133532019-06-14 15:47:29 -0700868 DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
869 1000);
Chris Zhongb59b8de2016-01-06 12:03:53 +0800870
Mark Yao2048e322014-08-22 18:36:26 +0800871 return true;
872}
873
Mark Yao63ebb9f2015-11-30 18:22:42 +0800874static void vop_crtc_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800875{
876 struct vop *vop = to_vop(crtc);
Mark Yao4e257d92016-04-20 10:41:42 +0800877 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800878 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
Mark Yao2048e322014-08-22 18:36:26 +0800879 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
880 u16 hdisplay = adjusted_mode->hdisplay;
881 u16 htotal = adjusted_mode->htotal;
882 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
883 u16 hact_end = hact_st + hdisplay;
884 u16 vdisplay = adjusted_mode->vdisplay;
885 u16 vtotal = adjusted_mode->vtotal;
886 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
887 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
888 u16 vact_end = vact_st + vdisplay;
Mark Yao0a63bfd2016-04-20 14:18:16 +0800889 uint32_t pin_pol, val;
Sean Paul39a9ad82016-08-15 16:12:29 -0700890 int ret;
Mark Yao2048e322014-08-22 18:36:26 +0800891
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200892 WARN_ON(vop->event);
893
Sean Paul39a9ad82016-08-15 16:12:29 -0700894 ret = vop_enable(crtc);
895 if (ret) {
896 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
897 return;
898 }
899
Mark Yao2048e322014-08-22 18:36:26 +0800900 /*
Mark Yaoce3887e2015-12-16 18:08:17 +0800901 * If dclk rate is zero, mean that scanout is stop,
902 * we don't need wait any more.
Mark Yao2048e322014-08-22 18:36:26 +0800903 */
Mark Yaoce3887e2015-12-16 18:08:17 +0800904 if (clk_get_rate(vop->dclk)) {
905 /*
906 * Rk3288 vop timing register is immediately, when configure
907 * display timing on display time, may cause tearing.
908 *
909 * Vop standby will take effect at end of current frame,
910 * if dsp hold valid irq happen, it means standby complete.
911 *
912 * mode set:
913 * standby and wait complete --> |----
914 * | display time
915 * |----
916 * |---> dsp hold irq
917 * configure display timing --> |
918 * standby exit |
919 * | new frame start.
920 */
921
922 reinit_completion(&vop->dsp_hold_completion);
923 vop_dsp_hold_valid_irq_enable(vop);
924
925 spin_lock(&vop->reg_lock);
926
927 VOP_CTRL_SET(vop, standby, 1);
928
929 spin_unlock(&vop->reg_lock);
930
931 wait_for_completion(&vop->dsp_hold_completion);
932
933 vop_dsp_hold_valid_irq_disable(vop);
934 }
Mark Yao2048e322014-08-22 18:36:26 +0800935
Mark Yao0a63bfd2016-04-20 14:18:16 +0800936 pin_pol = 0x8;
937 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
938 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
939 VOP_CTRL_SET(vop, pin_pol, pin_pol);
940
Mark Yao4e257d92016-04-20 10:41:42 +0800941 switch (s->output_type) {
942 case DRM_MODE_CONNECTOR_LVDS:
943 VOP_CTRL_SET(vop, rgb_en, 1);
Mark Yao0a63bfd2016-04-20 14:18:16 +0800944 VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800945 break;
946 case DRM_MODE_CONNECTOR_eDP:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800947 VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800948 VOP_CTRL_SET(vop, edp_en, 1);
949 break;
950 case DRM_MODE_CONNECTOR_HDMIA:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800951 VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800952 VOP_CTRL_SET(vop, hdmi_en, 1);
953 break;
954 case DRM_MODE_CONNECTOR_DSI:
Mark Yao0a63bfd2016-04-20 14:18:16 +0800955 VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +0800956 VOP_CTRL_SET(vop, mipi_en, 1);
957 break;
958 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400959 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
960 s->output_type);
Mark Yao4e257d92016-04-20 10:41:42 +0800961 }
962 VOP_CTRL_SET(vop, out_mode, s->output_mode);
Mark Yao2048e322014-08-22 18:36:26 +0800963
964 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
965 val = hact_st << 16;
966 val |= hact_end;
967 VOP_CTRL_SET(vop, hact_st_end, val);
968 VOP_CTRL_SET(vop, hpost_st_end, val);
969
970 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
971 val = vact_st << 16;
972 val |= vact_end;
973 VOP_CTRL_SET(vop, vact_st_end, val);
974 VOP_CTRL_SET(vop, vpost_st_end, val);
975
Mark Yao2048e322014-08-22 18:36:26 +0800976 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
Mark Yaoce3887e2015-12-16 18:08:17 +0800977
978 VOP_CTRL_SET(vop, standby, 0);
Sean Paulb883c9b2016-08-18 12:01:46 -0700979
980 rockchip_drm_psr_activate(&vop->crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800981}
Mark Yao2048e322014-08-22 18:36:26 +0800982
Tomasz Figa7caecdb2016-09-14 21:54:56 +0900983static bool vop_fs_irq_is_pending(struct vop *vop)
984{
985 return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
986}
987
988static void vop_wait_for_irq_handler(struct vop *vop)
989{
990 bool pending;
991 int ret;
992
993 /*
994 * Spin until frame start interrupt status bit goes low, which means
995 * that interrupt handler was invoked and cleared it. The timeout of
996 * 10 msecs is really too long, but it is just a safety measure if
997 * something goes really wrong. The wait will only happen in the very
998 * unlikely case of a vblank happening exactly at the same time and
999 * shouldn't exceed microseconds range.
1000 */
1001 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
1002 !pending, 0, 10 * 1000);
1003 if (ret)
1004 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
1005
1006 synchronize_irq(vop->irq);
1007}
1008
Mark Yao63ebb9f2015-11-30 18:22:42 +08001009static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1010 struct drm_crtc_state *old_crtc_state)
1011{
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001012 struct drm_atomic_state *old_state = old_crtc_state->state;
1013 struct drm_plane_state *old_plane_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001014 struct vop *vop = to_vop(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001015 struct drm_plane *plane;
1016 int i;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001017
1018 if (WARN_ON(!vop->is_enabled))
1019 return;
1020
1021 spin_lock(&vop->reg_lock);
1022
1023 vop_cfg_done(vop);
1024
1025 spin_unlock(&vop->reg_lock);
Tomasz Figa7caecdb2016-09-14 21:54:56 +09001026
1027 /*
1028 * There is a (rather unlikely) possiblity that a vblank interrupt
1029 * fired before we set the cfg_done bit. To avoid spuriously
1030 * signalling flip completion we need to wait for it to finish.
1031 */
1032 vop_wait_for_irq_handler(vop);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001033
Tomasz Figa41ee4362016-09-14 21:55:00 +09001034 spin_lock_irq(&crtc->dev->event_lock);
1035 if (crtc->state->event) {
1036 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1037 WARN_ON(vop->event);
1038
1039 vop->event = crtc->state->event;
1040 crtc->state->event = NULL;
1041 }
1042 spin_unlock_irq(&crtc->dev->event_lock);
1043
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001044 for_each_plane_in_state(old_state, plane, old_plane_state, i) {
1045 if (!old_plane_state->fb)
1046 continue;
1047
1048 if (old_plane_state->fb == plane->state->fb)
1049 continue;
1050
1051 drm_framebuffer_reference(old_plane_state->fb);
1052 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1053 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1054 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1055 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001056}
1057
1058static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1059 struct drm_crtc_state *old_crtc_state)
1060{
Sean Paulb883c9b2016-08-18 12:01:46 -07001061 rockchip_drm_psr_flush(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001062}
1063
Mark Yao2048e322014-08-22 18:36:26 +08001064static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
Mark Yao0ad36752015-11-09 11:33:16 +08001065 .enable = vop_crtc_enable,
1066 .disable = vop_crtc_disable,
Mark Yao2048e322014-08-22 18:36:26 +08001067 .mode_fixup = vop_crtc_mode_fixup,
Mark Yao63ebb9f2015-11-30 18:22:42 +08001068 .atomic_flush = vop_crtc_atomic_flush,
1069 .atomic_begin = vop_crtc_atomic_begin,
Mark Yao2048e322014-08-22 18:36:26 +08001070};
1071
Mark Yao2048e322014-08-22 18:36:26 +08001072static void vop_crtc_destroy(struct drm_crtc *crtc)
1073{
1074 drm_crtc_cleanup(crtc);
1075}
1076
John Keepingdc0b4082016-07-14 16:29:15 +01001077static void vop_crtc_reset(struct drm_crtc *crtc)
1078{
1079 if (crtc->state)
1080 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1081 kfree(crtc->state);
1082
1083 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
1084 if (crtc->state)
1085 crtc->state->crtc = crtc;
1086}
1087
Mark Yao4e257d92016-04-20 10:41:42 +08001088static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
1089{
1090 struct rockchip_crtc_state *rockchip_state;
1091
1092 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
1093 if (!rockchip_state)
1094 return NULL;
1095
1096 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
1097 return &rockchip_state->base;
1098}
1099
1100static void vop_crtc_destroy_state(struct drm_crtc *crtc,
1101 struct drm_crtc_state *state)
1102{
1103 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
1104
Daniel Vetterec2dc6a2016-05-09 16:34:09 +02001105 __drm_atomic_helper_crtc_destroy_state(&s->base);
Mark Yao4e257d92016-04-20 10:41:42 +08001106 kfree(s);
1107}
1108
Mark Yao2048e322014-08-22 18:36:26 +08001109static const struct drm_crtc_funcs vop_crtc_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001110 .set_config = drm_atomic_helper_set_config,
1111 .page_flip = drm_atomic_helper_page_flip,
Mark Yao2048e322014-08-22 18:36:26 +08001112 .destroy = vop_crtc_destroy,
John Keepingdc0b4082016-07-14 16:29:15 +01001113 .reset = vop_crtc_reset,
Mark Yao4e257d92016-04-20 10:41:42 +08001114 .atomic_duplicate_state = vop_crtc_duplicate_state,
1115 .atomic_destroy_state = vop_crtc_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +08001116};
1117
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001118static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
1119{
1120 struct vop *vop = container_of(work, struct vop, fb_unref_work);
1121 struct drm_framebuffer *fb = val;
1122
1123 drm_crtc_vblank_put(&vop->crtc);
1124 drm_framebuffer_unreference(fb);
1125}
1126
Mark Yao63ebb9f2015-11-30 18:22:42 +08001127static void vop_handle_vblank(struct vop *vop)
1128{
1129 struct drm_device *drm = vop->drm_dev;
1130 struct drm_crtc *crtc = &vop->crtc;
1131 unsigned long flags;
Mark Yao2048e322014-08-22 18:36:26 +08001132
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001133 spin_lock_irqsave(&drm->event_lock, flags);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001134 if (vop->event) {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001135 drm_crtc_send_vblank_event(crtc, vop->event);
Sean Paul5b680402016-08-10 16:24:39 -04001136 drm_crtc_vblank_put(crtc);
Tomasz Figa646ec682016-09-14 21:54:59 +09001137 vop->event = NULL;
Sean Paul5b680402016-08-10 16:24:39 -04001138 }
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001139 spin_unlock_irqrestore(&drm->event_lock, flags);
1140
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001141 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
1142 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
Mark Yao2048e322014-08-22 18:36:26 +08001143}
1144
1145static irqreturn_t vop_isr(int irq, void *data)
1146{
1147 struct vop *vop = data;
Mark Yaob5f7b752015-11-23 15:21:08 +08001148 struct drm_crtc *crtc = &vop->crtc;
Mark Yaodbb3d942015-12-15 08:36:55 +08001149 uint32_t active_irqs;
Mark Yao2048e322014-08-22 18:36:26 +08001150 unsigned long flags;
Mark Yao10672192015-02-04 13:10:31 +08001151 int ret = IRQ_NONE;
Mark Yao2048e322014-08-22 18:36:26 +08001152
1153 /*
Mark Yaodbb3d942015-12-15 08:36:55 +08001154 * interrupt register has interrupt status, enable and clear bits, we
Mark Yao2048e322014-08-22 18:36:26 +08001155 * must hold irq_lock to avoid a race with enable/disable_vblank().
1156 */
1157 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +08001158
1159 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
Mark Yao2048e322014-08-22 18:36:26 +08001160 /* Clear all active interrupt sources */
1161 if (active_irqs)
Mark Yaodbb3d942015-12-15 08:36:55 +08001162 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
1163
Mark Yao2048e322014-08-22 18:36:26 +08001164 spin_unlock_irqrestore(&vop->irq_lock, flags);
1165
1166 /* This is expected for vop iommu irqs, since the irq is shared */
1167 if (!active_irqs)
1168 return IRQ_NONE;
1169
Mark Yao10672192015-02-04 13:10:31 +08001170 if (active_irqs & DSP_HOLD_VALID_INTR) {
1171 complete(&vop->dsp_hold_completion);
1172 active_irqs &= ~DSP_HOLD_VALID_INTR;
1173 ret = IRQ_HANDLED;
Mark Yao2048e322014-08-22 18:36:26 +08001174 }
1175
Yakir Yang69c34e42016-07-24 14:57:40 +08001176 if (active_irqs & LINE_FLAG_INTR) {
1177 complete(&vop->line_flag_completion);
1178 active_irqs &= ~LINE_FLAG_INTR;
1179 ret = IRQ_HANDLED;
1180 }
1181
Mark Yao10672192015-02-04 13:10:31 +08001182 if (active_irqs & FS_INTR) {
Mark Yaob5f7b752015-11-23 15:21:08 +08001183 drm_crtc_handle_vblank(crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001184 vop_handle_vblank(vop);
Mark Yao10672192015-02-04 13:10:31 +08001185 active_irqs &= ~FS_INTR;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001186 ret = IRQ_HANDLED;
Mark Yao10672192015-02-04 13:10:31 +08001187 }
Mark Yao2048e322014-08-22 18:36:26 +08001188
Mark Yao10672192015-02-04 13:10:31 +08001189 /* Unhandled irqs are spurious. */
1190 if (active_irqs)
Sean Paulee4d7892016-08-12 13:00:54 -04001191 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
1192 active_irqs);
Mark Yao10672192015-02-04 13:10:31 +08001193
1194 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001195}
1196
1197static int vop_create_crtc(struct vop *vop)
1198{
1199 const struct vop_data *vop_data = vop->data;
1200 struct device *dev = vop->dev;
1201 struct drm_device *drm_dev = vop->drm_dev;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001202 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001203 struct drm_crtc *crtc = &vop->crtc;
1204 struct device_node *port;
1205 int ret;
1206 int i;
1207
1208 /*
1209 * Create drm_plane for primary and cursor planes first, since we need
1210 * to pass them to drm_crtc_init_with_planes, which sets the
1211 * "possible_crtcs" to the newly initialized crtc.
1212 */
1213 for (i = 0; i < vop_data->win_size; i++) {
1214 struct vop_win *vop_win = &vop->win[i];
1215 const struct vop_win_data *win_data = vop_win->data;
1216
1217 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1218 win_data->type != DRM_PLANE_TYPE_CURSOR)
1219 continue;
1220
1221 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1222 0, &vop_plane_funcs,
1223 win_data->phy->data_formats,
1224 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001225 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001226 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001227 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
1228 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001229 goto err_cleanup_planes;
1230 }
1231
1232 plane = &vop_win->base;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001233 drm_plane_helper_add(plane, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001234 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1235 primary = plane;
1236 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1237 cursor = plane;
1238 }
1239
1240 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
Ville Syrjäläf9882872015-12-09 16:19:31 +02001241 &vop_crtc_funcs, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001242 if (ret)
Douglas Anderson328b51c2016-03-07 14:00:52 -08001243 goto err_cleanup_planes;
Mark Yao2048e322014-08-22 18:36:26 +08001244
1245 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1246
1247 /*
1248 * Create drm_planes for overlay windows with possible_crtcs restricted
1249 * to the newly created crtc.
1250 */
1251 for (i = 0; i < vop_data->win_size; i++) {
1252 struct vop_win *vop_win = &vop->win[i];
1253 const struct vop_win_data *win_data = vop_win->data;
1254 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1255
1256 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1257 continue;
1258
1259 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1260 possible_crtcs,
1261 &vop_plane_funcs,
1262 win_data->phy->data_formats,
1263 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001264 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001265 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001266 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
1267 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001268 goto err_cleanup_crtc;
1269 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001270 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001271 }
1272
1273 port = of_get_child_by_name(dev->of_node, "port");
1274 if (!port) {
Sean Paulee4d7892016-08-12 13:00:54 -04001275 DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
1276 dev->of_node->full_name);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001277 ret = -ENOENT;
Mark Yao2048e322014-08-22 18:36:26 +08001278 goto err_cleanup_crtc;
1279 }
1280
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001281 drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
1282 vop_fb_unref_worker);
1283
Mark Yao10672192015-02-04 13:10:31 +08001284 init_completion(&vop->dsp_hold_completion);
Yakir Yang69c34e42016-07-24 14:57:40 +08001285 init_completion(&vop->line_flag_completion);
Mark Yao2048e322014-08-22 18:36:26 +08001286 crtc->port = port;
Mark Yaob5f7b752015-11-23 15:21:08 +08001287 rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001288
1289 return 0;
1290
1291err_cleanup_crtc:
1292 drm_crtc_cleanup(crtc);
1293err_cleanup_planes:
Douglas Anderson328b51c2016-03-07 14:00:52 -08001294 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1295 head)
Mark Yao2048e322014-08-22 18:36:26 +08001296 drm_plane_cleanup(plane);
1297 return ret;
1298}
1299
1300static void vop_destroy_crtc(struct vop *vop)
1301{
1302 struct drm_crtc *crtc = &vop->crtc;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001303 struct drm_device *drm_dev = vop->drm_dev;
1304 struct drm_plane *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001305
Mark Yaob5f7b752015-11-23 15:21:08 +08001306 rockchip_unregister_crtc_funcs(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001307 of_node_put(crtc->port);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001308
1309 /*
1310 * We need to cleanup the planes now. Why?
1311 *
1312 * The planes are "&vop->win[i].base". That means the memory is
1313 * all part of the big "struct vop" chunk of memory. That memory
1314 * was devm allocated and associated with this component. We need to
1315 * free it ourselves before vop_unbind() finishes.
1316 */
1317 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1318 head)
1319 vop_plane_destroy(plane);
1320
1321 /*
1322 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1323 * references the CRTC.
1324 */
Mark Yao2048e322014-08-22 18:36:26 +08001325 drm_crtc_cleanup(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001326 drm_flip_work_cleanup(&vop->fb_unref_work);
Mark Yao2048e322014-08-22 18:36:26 +08001327}
1328
1329static int vop_initial(struct vop *vop)
1330{
1331 const struct vop_data *vop_data = vop->data;
1332 const struct vop_reg_data *init_table = vop_data->init_table;
1333 struct reset_control *ahb_rst;
1334 int i, ret;
1335
1336 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1337 if (IS_ERR(vop->hclk)) {
1338 dev_err(vop->dev, "failed to get hclk source\n");
1339 return PTR_ERR(vop->hclk);
1340 }
1341 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1342 if (IS_ERR(vop->aclk)) {
1343 dev_err(vop->dev, "failed to get aclk source\n");
1344 return PTR_ERR(vop->aclk);
1345 }
1346 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1347 if (IS_ERR(vop->dclk)) {
1348 dev_err(vop->dev, "failed to get dclk source\n");
1349 return PTR_ERR(vop->dclk);
1350 }
1351
Jeffy Chen543a1812017-04-06 20:31:20 +08001352 ret = pm_runtime_get_sync(vop->dev);
1353 if (ret < 0) {
1354 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
1355 return ret;
1356 }
1357
Mark Yao2048e322014-08-22 18:36:26 +08001358 ret = clk_prepare(vop->dclk);
1359 if (ret < 0) {
1360 dev_err(vop->dev, "failed to prepare dclk\n");
Jeffy Chen543a1812017-04-06 20:31:20 +08001361 goto err_put_pm_runtime;
Mark Yao2048e322014-08-22 18:36:26 +08001362 }
1363
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001364 /* Enable both the hclk and aclk to setup the vop */
1365 ret = clk_prepare_enable(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001366 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001367 dev_err(vop->dev, "failed to prepare/enable hclk\n");
Mark Yao2048e322014-08-22 18:36:26 +08001368 goto err_unprepare_dclk;
1369 }
1370
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001371 ret = clk_prepare_enable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001372 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001373 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1374 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +08001375 }
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001376
Mark Yao2048e322014-08-22 18:36:26 +08001377 /*
1378 * do hclk_reset, reset all vop registers.
1379 */
1380 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1381 if (IS_ERR(ahb_rst)) {
1382 dev_err(vop->dev, "failed to get ahb reset\n");
1383 ret = PTR_ERR(ahb_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001384 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001385 }
1386 reset_control_assert(ahb_rst);
1387 usleep_range(10, 20);
1388 reset_control_deassert(ahb_rst);
1389
Marc Zyngier7c320ed2018-02-20 13:01:18 +00001390 VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
1391 VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
1392
Mark Yao2048e322014-08-22 18:36:26 +08001393 memcpy(vop->regsbak, vop->regs, vop->len);
1394
1395 for (i = 0; i < vop_data->table_size; i++)
1396 vop_writel(vop, init_table[i].offset, init_table[i].value);
1397
1398 for (i = 0; i < vop_data->win_size; i++) {
1399 const struct vop_win_data *win = &vop_data->win[i];
1400
1401 VOP_WIN_SET(vop, win, enable, 0);
1402 }
1403
1404 vop_cfg_done(vop);
1405
1406 /*
1407 * do dclk_reset, let all config take affect.
1408 */
1409 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1410 if (IS_ERR(vop->dclk_rst)) {
1411 dev_err(vop->dev, "failed to get dclk reset\n");
1412 ret = PTR_ERR(vop->dclk_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001413 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001414 }
1415 reset_control_assert(vop->dclk_rst);
1416 usleep_range(10, 20);
1417 reset_control_deassert(vop->dclk_rst);
1418
1419 clk_disable(vop->hclk);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001420 clk_disable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001421
Mark Yao31e980c2015-01-22 14:37:56 +08001422 vop->is_enabled = false;
Mark Yao2048e322014-08-22 18:36:26 +08001423
Jeffy Chen543a1812017-04-06 20:31:20 +08001424 pm_runtime_put_sync(vop->dev);
1425
Mark Yao2048e322014-08-22 18:36:26 +08001426 return 0;
1427
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001428err_disable_aclk:
1429 clk_disable_unprepare(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001430err_disable_hclk:
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001431 clk_disable_unprepare(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001432err_unprepare_dclk:
1433 clk_unprepare(vop->dclk);
Jeffy Chen543a1812017-04-06 20:31:20 +08001434err_put_pm_runtime:
1435 pm_runtime_put_sync(vop->dev);
Mark Yao2048e322014-08-22 18:36:26 +08001436 return ret;
1437}
1438
1439/*
1440 * Initialize the vop->win array elements.
1441 */
1442static void vop_win_init(struct vop *vop)
1443{
1444 const struct vop_data *vop_data = vop->data;
1445 unsigned int i;
1446
1447 for (i = 0; i < vop_data->win_size; i++) {
1448 struct vop_win *vop_win = &vop->win[i];
1449 const struct vop_win_data *win_data = &vop_data->win[i];
1450
1451 vop_win->data = win_data;
1452 vop_win->vop = vop;
Mark Yao2048e322014-08-22 18:36:26 +08001453 }
1454}
1455
Yakir Yang69c34e42016-07-24 14:57:40 +08001456/**
1457 * rockchip_drm_wait_line_flag - acqiure the give line flag event
1458 * @crtc: CRTC to enable line flag
1459 * @line_num: interested line number
1460 * @mstimeout: millisecond for timeout
1461 *
1462 * Driver would hold here until the interested line flag interrupt have
1463 * happened or timeout to wait.
1464 *
1465 * Returns:
1466 * Zero on success, negative errno on failure.
1467 */
1468int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
1469 unsigned int mstimeout)
1470{
1471 struct vop *vop = to_vop(crtc);
1472 unsigned long jiffies_left;
1473
1474 if (!crtc || !vop->is_enabled)
1475 return -ENODEV;
1476
1477 if (line_num > crtc->mode.vtotal || mstimeout <= 0)
1478 return -EINVAL;
1479
1480 if (vop_line_flag_irq_is_enabled(vop))
1481 return -EBUSY;
1482
1483 reinit_completion(&vop->line_flag_completion);
1484 vop_line_flag_irq_enable(vop, line_num);
1485
1486 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
1487 msecs_to_jiffies(mstimeout));
1488 vop_line_flag_irq_disable(vop);
1489
1490 if (jiffies_left == 0) {
1491 dev_err(vop->dev, "Timeout waiting for IRQ\n");
1492 return -ETIMEDOUT;
1493 }
1494
1495 return 0;
1496}
1497EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
1498
Mark Yao2048e322014-08-22 18:36:26 +08001499static int vop_bind(struct device *dev, struct device *master, void *data)
1500{
1501 struct platform_device *pdev = to_platform_device(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001502 const struct vop_data *vop_data;
1503 struct drm_device *drm_dev = data;
1504 struct vop *vop;
1505 struct resource *res;
1506 size_t alloc_size;
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001507 int ret, irq;
Mark Yao2048e322014-08-22 18:36:26 +08001508
Mark Yaoa67719d2015-12-15 08:58:26 +08001509 vop_data = of_device_get_match_data(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001510 if (!vop_data)
1511 return -ENODEV;
1512
1513 /* Allocate vop struct and its vop_win array */
1514 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1515 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1516 if (!vop)
1517 return -ENOMEM;
1518
1519 vop->dev = dev;
1520 vop->data = vop_data;
1521 vop->drm_dev = drm_dev;
1522 dev_set_drvdata(dev, vop);
1523
1524 vop_win_init(vop);
1525
1526 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1527 vop->len = resource_size(res);
1528 vop->regs = devm_ioremap_resource(dev, res);
1529 if (IS_ERR(vop->regs))
1530 return PTR_ERR(vop->regs);
1531
1532 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1533 if (!vop->regsbak)
1534 return -ENOMEM;
1535
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001536 irq = platform_get_irq(pdev, 0);
1537 if (irq < 0) {
Mark Yao2048e322014-08-22 18:36:26 +08001538 dev_err(dev, "cannot find irq for vop\n");
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001539 return irq;
Mark Yao2048e322014-08-22 18:36:26 +08001540 }
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001541 vop->irq = (unsigned int)irq;
Mark Yao2048e322014-08-22 18:36:26 +08001542
1543 spin_lock_init(&vop->reg_lock);
1544 spin_lock_init(&vop->irq_lock);
1545
1546 mutex_init(&vop->vsync_mutex);
1547
Mark Yao2048e322014-08-22 18:36:26 +08001548 ret = vop_create_crtc(vop);
1549 if (ret)
Marc Zyngier7c320ed2018-02-20 13:01:18 +00001550 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001551
1552 pm_runtime_enable(&pdev->dev);
Yakir Yang5182c1a2016-07-24 14:57:44 +08001553
Jeffy Chen543a1812017-04-06 20:31:20 +08001554 ret = vop_initial(vop);
1555 if (ret < 0) {
1556 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1557 goto err_disable_pm_runtime;
1558 }
1559
Marc Zyngier7c320ed2018-02-20 13:01:18 +00001560 ret = devm_request_irq(dev, vop->irq, vop_isr,
1561 IRQF_SHARED, dev_name(dev), vop);
1562 if (ret)
1563 goto err_disable_pm_runtime;
1564
1565 /* IRQ is initially disabled; it gets enabled in power_on */
1566 disable_irq(vop->irq);
1567
Mark Yao2048e322014-08-22 18:36:26 +08001568 return 0;
Sean Paul8c763c92016-09-16 14:22:03 -04001569
Jeffy Chen543a1812017-04-06 20:31:20 +08001570err_disable_pm_runtime:
1571 pm_runtime_disable(&pdev->dev);
1572 vop_destroy_crtc(vop);
Sean Paul8c763c92016-09-16 14:22:03 -04001573 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001574}
1575
1576static void vop_unbind(struct device *dev, struct device *master, void *data)
1577{
1578 struct vop *vop = dev_get_drvdata(dev);
1579
1580 pm_runtime_disable(dev);
1581 vop_destroy_crtc(vop);
1582}
1583
Mark Yaoa67719d2015-12-15 08:58:26 +08001584const struct component_ops vop_component_ops = {
Mark Yao2048e322014-08-22 18:36:26 +08001585 .bind = vop_bind,
1586 .unbind = vop_unbind,
1587};
Stephen Rothwell54255e82015-12-31 13:40:11 +11001588EXPORT_SYMBOL_GPL(vop_component_ops);