blob: 82dafcdd905685d9069bbde3a8002c994dba919b [file] [log] [blame]
Mark Yao2048e322014-08-22 18:36:26 +08001/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
Mark Yao63ebb9f2015-11-30 18:22:42 +080017#include <drm/drm_atomic.h>
Mark Yao2048e322014-08-22 18:36:26 +080018#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
Tomasz Figa47a7eb42016-09-14 21:54:57 +090020#include <drm/drm_flip_work.h>
Mark Yao2048e322014-08-22 18:36:26 +080021#include <drm/drm_plane_helper.h>
22
23#include <linux/kernel.h>
Paul Gortmaker00fe6142015-05-01 20:02:30 -040024#include <linux/module.h>
Mark Yao2048e322014-08-22 18:36:26 +080025#include <linux/platform_device.h>
26#include <linux/clk.h>
Tomasz Figa7caecdb2016-09-14 21:54:56 +090027#include <linux/iopoll.h>
Mark Yao2048e322014-08-22 18:36:26 +080028#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/component.h>
32
33#include <linux/reset.h>
34#include <linux/delay.h>
35
36#include "rockchip_drm_drv.h"
37#include "rockchip_drm_gem.h"
38#include "rockchip_drm_fb.h"
Yakir Yang5182c1a2016-07-24 14:57:44 +080039#include "rockchip_drm_psr.h"
Mark Yao2048e322014-08-22 18:36:26 +080040#include "rockchip_drm_vop.h"
41
Mark Yaod49463e2016-04-20 14:18:15 +080042#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
43 vop_mask_write(x, off, mask, shift, v, write_mask, true)
44
45#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
46 vop_mask_write(x, off, mask, shift, v, write_mask, false)
Mark Yao2048e322014-08-22 18:36:26 +080047
48#define REG_SET(x, base, reg, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080049 __REG_SET_##mode(x, base + reg.offset, \
50 reg.mask, reg.shift, v, reg.write_mask)
John Keepingc7647f82016-01-12 18:05:18 +000051#define REG_SET_MASK(x, base, reg, mask, v, mode) \
Mark Yaod49463e2016-04-20 14:18:15 +080052 __REG_SET_##mode(x, base + reg.offset, \
53 mask, reg.shift, v, reg.write_mask)
Mark Yao2048e322014-08-22 18:36:26 +080054
55#define VOP_WIN_SET(x, win, name, v) \
56 REG_SET(x, win->base, win->phy->name, v, RELAXED)
Mark Yao4c156c22015-06-26 17:14:46 +080057#define VOP_SCL_SET(x, win, name, v) \
58 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
Mark Yao1194fff2015-12-15 09:08:43 +080059#define VOP_SCL_SET_EXT(x, win, name, v) \
60 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
Mark Yao2048e322014-08-22 18:36:26 +080061#define VOP_CTRL_SET(x, name, v) \
62 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
63
Mark Yaodbb3d942015-12-15 08:36:55 +080064#define VOP_INTR_GET(vop, name) \
65 vop_read_reg(vop, 0, &vop->data->ctrl->name)
66
John Keepingc7647f82016-01-12 18:05:18 +000067#define VOP_INTR_SET(vop, name, mask, v) \
68 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
Mark Yaodbb3d942015-12-15 08:36:55 +080069#define VOP_INTR_SET_TYPE(vop, name, type, v) \
70 do { \
John Keepingc7647f82016-01-12 18:05:18 +000071 int i, reg = 0, mask = 0; \
Mark Yaodbb3d942015-12-15 08:36:55 +080072 for (i = 0; i < vop->data->intr->nintrs; i++) { \
John Keepingc7647f82016-01-12 18:05:18 +000073 if (vop->data->intr->intrs[i] & type) { \
Mark Yaodbb3d942015-12-15 08:36:55 +080074 reg |= (v) << i; \
John Keepingc7647f82016-01-12 18:05:18 +000075 mask |= 1 << i; \
76 } \
Mark Yaodbb3d942015-12-15 08:36:55 +080077 } \
John Keepingc7647f82016-01-12 18:05:18 +000078 VOP_INTR_SET(vop, name, mask, reg); \
Mark Yaodbb3d942015-12-15 08:36:55 +080079 } while (0)
80#define VOP_INTR_GET_TYPE(vop, name, type) \
81 vop_get_intr_type(vop, &vop->data->intr->name, type)
82
Mark Yao2048e322014-08-22 18:36:26 +080083#define VOP_WIN_GET(x, win, name) \
84 vop_read_reg(x, win->base, &win->phy->name)
85
86#define VOP_WIN_GET_YRGBADDR(vop, win) \
87 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
88
89#define to_vop(x) container_of(x, struct vop, crtc)
90#define to_vop_win(x) container_of(x, struct vop_win, base)
Mark Yao63ebb9f2015-11-30 18:22:42 +080091#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
Mark Yao2048e322014-08-22 18:36:26 +080092
Tomasz Figa47a7eb42016-09-14 21:54:57 +090093enum vop_pending {
94 VOP_PENDING_FB_UNREF,
95};
96
Mark Yao63ebb9f2015-11-30 18:22:42 +080097struct vop_plane_state {
98 struct drm_plane_state base;
99 int format;
Mark Yao2048e322014-08-22 18:36:26 +0800100 dma_addr_t yrgb_mst;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800101 bool enable;
Mark Yao2048e322014-08-22 18:36:26 +0800102};
103
104struct vop_win {
105 struct drm_plane base;
106 const struct vop_win_data *data;
107 struct vop *vop;
108
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200109 /* protected by dev->event_lock */
110 bool enable;
111 dma_addr_t yrgb_mst;
Mark Yao2048e322014-08-22 18:36:26 +0800112};
113
114struct vop {
115 struct drm_crtc crtc;
116 struct device *dev;
117 struct drm_device *drm_dev;
Mark Yao31e980c2015-01-22 14:37:56 +0800118 bool is_enabled;
Sean Paul5b680402016-08-10 16:24:39 -0400119 bool vblank_active;
Mark Yao2048e322014-08-22 18:36:26 +0800120
Mark Yao2048e322014-08-22 18:36:26 +0800121 /* mutex vsync_ work */
122 struct mutex vsync_mutex;
123 bool vsync_work_pending;
Mark Yao10672192015-02-04 13:10:31 +0800124 struct completion dsp_hold_completion;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800125 struct completion wait_update_complete;
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200126
127 /* protected by dev->event_lock */
Mark Yao63ebb9f2015-11-30 18:22:42 +0800128 struct drm_pending_vblank_event *event;
Mark Yao2048e322014-08-22 18:36:26 +0800129
Tomasz Figa47a7eb42016-09-14 21:54:57 +0900130 struct drm_flip_work fb_unref_work;
131 unsigned long pending;
132
Yakir Yang69c34e42016-07-24 14:57:40 +0800133 struct completion line_flag_completion;
134
Mark Yao2048e322014-08-22 18:36:26 +0800135 const struct vop_data *data;
136
137 uint32_t *regsbak;
138 void __iomem *regs;
139
140 /* physical map length of vop register */
141 uint32_t len;
142
143 /* one time only one process allowed to config the register */
144 spinlock_t reg_lock;
145 /* lock vop irq reg */
146 spinlock_t irq_lock;
147
148 unsigned int irq;
149
150 /* vop AHP clk */
151 struct clk *hclk;
152 /* vop dclk */
153 struct clk *dclk;
154 /* vop share memory frequency */
155 struct clk *aclk;
156
157 /* vop dclk reset */
158 struct reset_control *dclk_rst;
159
Mark Yao2048e322014-08-22 18:36:26 +0800160 struct vop_win win[];
161};
162
Mark Yao2048e322014-08-22 18:36:26 +0800163static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
164{
165 writel(v, vop->regs + offset);
166 vop->regsbak[offset >> 2] = v;
167}
168
169static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
170{
171 return readl(vop->regs + offset);
172}
173
174static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
175 const struct vop_reg *reg)
176{
177 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
178}
179
Mark Yao2048e322014-08-22 18:36:26 +0800180static inline void vop_mask_write(struct vop *vop, uint32_t offset,
Mark Yaod49463e2016-04-20 14:18:15 +0800181 uint32_t mask, uint32_t shift, uint32_t v,
182 bool write_mask, bool relaxed)
Mark Yao2048e322014-08-22 18:36:26 +0800183{
Mark Yaod49463e2016-04-20 14:18:15 +0800184 if (!mask)
185 return;
186
187 if (write_mask) {
188 v = ((v << shift) & 0xffff) | (mask << (shift + 16));
189 } else {
Mark Yao2048e322014-08-22 18:36:26 +0800190 uint32_t cached_val = vop->regsbak[offset >> 2];
191
Mark Yaod49463e2016-04-20 14:18:15 +0800192 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
193 vop->regsbak[offset >> 2] = v;
Mark Yao2048e322014-08-22 18:36:26 +0800194 }
Mark Yao2048e322014-08-22 18:36:26 +0800195
Mark Yaod49463e2016-04-20 14:18:15 +0800196 if (relaxed)
197 writel_relaxed(v, vop->regs + offset);
198 else
199 writel(v, vop->regs + offset);
Mark Yao2048e322014-08-22 18:36:26 +0800200}
201
Mark Yaodbb3d942015-12-15 08:36:55 +0800202static inline uint32_t vop_get_intr_type(struct vop *vop,
203 const struct vop_reg *reg, int type)
204{
205 uint32_t i, ret = 0;
206 uint32_t regs = vop_read_reg(vop, 0, reg);
207
208 for (i = 0; i < vop->data->intr->nintrs; i++) {
209 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
210 ret |= vop->data->intr->intrs[i];
211 }
212
213 return ret;
214}
215
Mark Yao0cf33fe2015-12-14 18:14:36 +0800216static inline void vop_cfg_done(struct vop *vop)
217{
218 VOP_CTRL_SET(vop, cfg_done, 1);
219}
220
Tomasz Figa85a359f2015-05-11 19:55:39 +0900221static bool has_rb_swapped(uint32_t format)
222{
223 switch (format) {
224 case DRM_FORMAT_XBGR8888:
225 case DRM_FORMAT_ABGR8888:
226 case DRM_FORMAT_BGR888:
227 case DRM_FORMAT_BGR565:
228 return true;
229 default:
230 return false;
231 }
232}
233
Mark Yao2048e322014-08-22 18:36:26 +0800234static enum vop_data_format vop_convert_format(uint32_t format)
235{
236 switch (format) {
237 case DRM_FORMAT_XRGB8888:
238 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900239 case DRM_FORMAT_XBGR8888:
240 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800241 return VOP_FMT_ARGB8888;
242 case DRM_FORMAT_RGB888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900243 case DRM_FORMAT_BGR888:
Mark Yao2048e322014-08-22 18:36:26 +0800244 return VOP_FMT_RGB888;
245 case DRM_FORMAT_RGB565:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900246 case DRM_FORMAT_BGR565:
Mark Yao2048e322014-08-22 18:36:26 +0800247 return VOP_FMT_RGB565;
248 case DRM_FORMAT_NV12:
249 return VOP_FMT_YUV420SP;
250 case DRM_FORMAT_NV16:
251 return VOP_FMT_YUV422SP;
252 case DRM_FORMAT_NV24:
253 return VOP_FMT_YUV444SP;
254 default:
Sean Paulee4d7892016-08-12 13:00:54 -0400255 DRM_ERROR("unsupported format[%08x]\n", format);
Mark Yao2048e322014-08-22 18:36:26 +0800256 return -EINVAL;
257 }
258}
259
Mark Yao84c7f8c2015-07-20 16:16:49 +0800260static bool is_yuv_support(uint32_t format)
261{
262 switch (format) {
263 case DRM_FORMAT_NV12:
264 case DRM_FORMAT_NV16:
265 case DRM_FORMAT_NV24:
266 return true;
267 default:
268 return false;
269 }
270}
271
Mark Yao2048e322014-08-22 18:36:26 +0800272static bool is_alpha_support(uint32_t format)
273{
274 switch (format) {
275 case DRM_FORMAT_ARGB8888:
Tomasz Figa85a359f2015-05-11 19:55:39 +0900276 case DRM_FORMAT_ABGR8888:
Mark Yao2048e322014-08-22 18:36:26 +0800277 return true;
278 default:
279 return false;
280 }
281}
282
Mark Yao4c156c22015-06-26 17:14:46 +0800283static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
284 uint32_t dst, bool is_horizontal,
285 int vsu_mode, int *vskiplines)
286{
287 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
288
289 if (is_horizontal) {
290 if (mode == SCALE_UP)
291 val = GET_SCL_FT_BIC(src, dst);
292 else if (mode == SCALE_DOWN)
293 val = GET_SCL_FT_BILI_DN(src, dst);
294 } else {
295 if (mode == SCALE_UP) {
296 if (vsu_mode == SCALE_UP_BIL)
297 val = GET_SCL_FT_BILI_UP(src, dst);
298 else
299 val = GET_SCL_FT_BIC(src, dst);
300 } else if (mode == SCALE_DOWN) {
301 if (vskiplines) {
302 *vskiplines = scl_get_vskiplines(src, dst);
303 val = scl_get_bili_dn_vskip(src, dst,
304 *vskiplines);
305 } else {
306 val = GET_SCL_FT_BILI_DN(src, dst);
307 }
308 }
309 }
310
311 return val;
312}
313
314static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
315 uint32_t src_w, uint32_t src_h, uint32_t dst_w,
316 uint32_t dst_h, uint32_t pixel_format)
317{
318 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
319 uint16_t cbcr_hor_scl_mode = SCALE_NONE;
320 uint16_t cbcr_ver_scl_mode = SCALE_NONE;
321 int hsub = drm_format_horz_chroma_subsampling(pixel_format);
322 int vsub = drm_format_vert_chroma_subsampling(pixel_format);
323 bool is_yuv = is_yuv_support(pixel_format);
324 uint16_t cbcr_src_w = src_w / hsub;
325 uint16_t cbcr_src_h = src_h / vsub;
326 uint16_t vsu_mode;
327 uint16_t lb_mode;
328 uint32_t val;
Mark Yao2db00cf2016-04-29 15:39:53 +0800329 int vskiplines = 0;
Mark Yao4c156c22015-06-26 17:14:46 +0800330
331 if (dst_w > 3840) {
Sean Paulee4d7892016-08-12 13:00:54 -0400332 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800333 return;
334 }
335
Mark Yao1194fff2015-12-15 09:08:43 +0800336 if (!win->phy->scl->ext) {
337 VOP_SCL_SET(vop, win, scale_yrgb_x,
338 scl_cal_scale2(src_w, dst_w));
339 VOP_SCL_SET(vop, win, scale_yrgb_y,
340 scl_cal_scale2(src_h, dst_h));
341 if (is_yuv) {
342 VOP_SCL_SET(vop, win, scale_cbcr_x,
Mark Yaoee8662f2016-06-06 15:58:46 +0800343 scl_cal_scale2(cbcr_src_w, dst_w));
Mark Yao1194fff2015-12-15 09:08:43 +0800344 VOP_SCL_SET(vop, win, scale_cbcr_y,
Mark Yaoee8662f2016-06-06 15:58:46 +0800345 scl_cal_scale2(cbcr_src_h, dst_h));
Mark Yao1194fff2015-12-15 09:08:43 +0800346 }
347 return;
348 }
349
Mark Yao4c156c22015-06-26 17:14:46 +0800350 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
351 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
352
353 if (is_yuv) {
354 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
355 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
356 if (cbcr_hor_scl_mode == SCALE_DOWN)
357 lb_mode = scl_vop_cal_lb_mode(dst_w, true);
358 else
359 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
360 } else {
361 if (yrgb_hor_scl_mode == SCALE_DOWN)
362 lb_mode = scl_vop_cal_lb_mode(dst_w, false);
363 else
364 lb_mode = scl_vop_cal_lb_mode(src_w, false);
365 }
366
Mark Yao1194fff2015-12-15 09:08:43 +0800367 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800368 if (lb_mode == LB_RGB_3840X2) {
369 if (yrgb_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400370 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800371 return;
372 }
373 if (cbcr_ver_scl_mode != SCALE_NONE) {
Sean Paulee4d7892016-08-12 13:00:54 -0400374 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
Mark Yao4c156c22015-06-26 17:14:46 +0800375 return;
376 }
377 vsu_mode = SCALE_UP_BIL;
378 } else if (lb_mode == LB_RGB_2560X4) {
379 vsu_mode = SCALE_UP_BIL;
380 } else {
381 vsu_mode = SCALE_UP_BIC;
382 }
383
384 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
385 true, 0, NULL);
386 VOP_SCL_SET(vop, win, scale_yrgb_x, val);
387 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
388 false, vsu_mode, &vskiplines);
389 VOP_SCL_SET(vop, win, scale_yrgb_y, val);
390
Mark Yao1194fff2015-12-15 09:08:43 +0800391 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
392 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
Mark Yao4c156c22015-06-26 17:14:46 +0800393
Mark Yao1194fff2015-12-15 09:08:43 +0800394 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
395 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
396 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
397 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
398 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800399 if (is_yuv) {
400 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
401 dst_w, true, 0, NULL);
402 VOP_SCL_SET(vop, win, scale_cbcr_x, val);
403 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
404 dst_h, false, vsu_mode, &vskiplines);
405 VOP_SCL_SET(vop, win, scale_cbcr_y, val);
406
Mark Yao1194fff2015-12-15 09:08:43 +0800407 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
408 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
409 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
410 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
411 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
412 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
413 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
Mark Yao4c156c22015-06-26 17:14:46 +0800414 }
415}
416
Mark Yao10672192015-02-04 13:10:31 +0800417static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
418{
419 unsigned long flags;
420
421 if (WARN_ON(!vop->is_enabled))
422 return;
423
424 spin_lock_irqsave(&vop->irq_lock, flags);
425
Tomasz Figafa374102016-09-14 21:54:54 +0900426 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800427 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
Mark Yao10672192015-02-04 13:10:31 +0800428
429 spin_unlock_irqrestore(&vop->irq_lock, flags);
430}
431
432static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
433{
434 unsigned long flags;
435
436 if (WARN_ON(!vop->is_enabled))
437 return;
438
439 spin_lock_irqsave(&vop->irq_lock, flags);
440
Mark Yaodbb3d942015-12-15 08:36:55 +0800441 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
Mark Yao10672192015-02-04 13:10:31 +0800442
443 spin_unlock_irqrestore(&vop->irq_lock, flags);
444}
445
Yakir Yang69c34e42016-07-24 14:57:40 +0800446/*
447 * (1) each frame starts at the start of the Vsync pulse which is signaled by
448 * the "FRAME_SYNC" interrupt.
449 * (2) the active data region of each frame ends at dsp_vact_end
450 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
451 * to get "LINE_FLAG" interrupt at the end of the active on screen data.
452 *
453 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
454 * Interrupts
455 * LINE_FLAG -------------------------------+
456 * FRAME_SYNC ----+ |
457 * | |
458 * v v
459 * | Vsync | Vbp | Vactive | Vfp |
460 * ^ ^ ^ ^
461 * | | | |
462 * | | | |
463 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
464 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
465 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
466 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
467 */
468static bool vop_line_flag_irq_is_enabled(struct vop *vop)
469{
470 uint32_t line_flag_irq;
471 unsigned long flags;
472
473 spin_lock_irqsave(&vop->irq_lock, flags);
474
475 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
476
477 spin_unlock_irqrestore(&vop->irq_lock, flags);
478
479 return !!line_flag_irq;
480}
481
482static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
483{
484 unsigned long flags;
485
486 if (WARN_ON(!vop->is_enabled))
487 return;
488
489 spin_lock_irqsave(&vop->irq_lock, flags);
490
491 VOP_CTRL_SET(vop, line_flag_num[0], line_num);
Tomasz Figafa374102016-09-14 21:54:54 +0900492 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
Yakir Yang69c34e42016-07-24 14:57:40 +0800493 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
494
495 spin_unlock_irqrestore(&vop->irq_lock, flags);
496}
497
498static void vop_line_flag_irq_disable(struct vop *vop)
499{
500 unsigned long flags;
501
502 if (WARN_ON(!vop->is_enabled))
503 return;
504
505 spin_lock_irqsave(&vop->irq_lock, flags);
506
507 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
508
509 spin_unlock_irqrestore(&vop->irq_lock, flags);
510}
511
Sean Paul39a9ad82016-08-15 16:12:29 -0700512static int vop_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800513{
514 struct vop *vop = to_vop(crtc);
515 int ret;
516
Mark Yao5d82d1a2015-04-01 13:48:53 +0800517 ret = pm_runtime_get_sync(vop->dev);
518 if (ret < 0) {
519 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
Sean Paul39a9ad82016-08-15 16:12:29 -0700520 goto err_put_pm_runtime;
Mark Yao5d82d1a2015-04-01 13:48:53 +0800521 }
522
Mark Yao2048e322014-08-22 18:36:26 +0800523 ret = clk_enable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700524 if (WARN_ON(ret < 0))
525 goto err_put_pm_runtime;
Mark Yao2048e322014-08-22 18:36:26 +0800526
527 ret = clk_enable(vop->dclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700528 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800529 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +0800530
531 ret = clk_enable(vop->aclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700532 if (WARN_ON(ret < 0))
Mark Yao2048e322014-08-22 18:36:26 +0800533 goto err_disable_dclk;
Mark Yao2048e322014-08-22 18:36:26 +0800534
535 /*
536 * Slave iommu shares power, irq and clock with vop. It was associated
537 * automatically with this master device via common driver code.
538 * Now that we have enabled the clock we attach it to the shared drm
539 * mapping.
540 */
541 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
542 if (ret) {
543 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
544 goto err_disable_aclk;
545 }
546
Mark Yao77faa162015-07-20 16:25:20 +0800547 memcpy(vop->regs, vop->regsbak, vop->len);
Mark Yao52ab7892015-01-22 18:29:57 +0800548 /*
549 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
550 */
551 vop->is_enabled = true;
552
Mark Yao2048e322014-08-22 18:36:26 +0800553 spin_lock(&vop->reg_lock);
554
555 VOP_CTRL_SET(vop, standby, 0);
556
557 spin_unlock(&vop->reg_lock);
558
559 enable_irq(vop->irq);
560
Mark Yaob5f7b752015-11-23 15:21:08 +0800561 drm_crtc_vblank_on(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800562
Sean Paul39a9ad82016-08-15 16:12:29 -0700563 return 0;
Mark Yao2048e322014-08-22 18:36:26 +0800564
565err_disable_aclk:
566 clk_disable(vop->aclk);
567err_disable_dclk:
568 clk_disable(vop->dclk);
569err_disable_hclk:
570 clk_disable(vop->hclk);
Sean Paul39a9ad82016-08-15 16:12:29 -0700571err_put_pm_runtime:
572 pm_runtime_put_sync(vop->dev);
573 return ret;
Mark Yao2048e322014-08-22 18:36:26 +0800574}
575
Mark Yao0ad36752015-11-09 11:33:16 +0800576static void vop_crtc_disable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800577{
578 struct vop *vop = to_vop(crtc);
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100579 int i;
Mark Yao2048e322014-08-22 18:36:26 +0800580
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200581 WARN_ON(vop->event);
582
Sean Paulb883c9b2016-08-18 12:01:46 -0700583 rockchip_drm_psr_deactivate(&vop->crtc);
584
Tomeu Vizoso3ed6c642016-03-22 16:08:04 +0100585 /*
586 * We need to make sure that all windows are disabled before we
587 * disable that crtc. Otherwise we might try to scan from a destroyed
588 * buffer later.
589 */
590 for (i = 0; i < vop->data->win_size; i++) {
591 struct vop_win *vop_win = &vop->win[i];
592 const struct vop_win_data *win = vop_win->data;
593
594 spin_lock(&vop->reg_lock);
595 VOP_WIN_SET(vop, win, enable, 0);
596 spin_unlock(&vop->reg_lock);
597 }
598
Mark Yaob5f7b752015-11-23 15:21:08 +0800599 drm_crtc_vblank_off(crtc);
Mark Yao2048e322014-08-22 18:36:26 +0800600
Mark Yao2048e322014-08-22 18:36:26 +0800601 /*
Mark Yao10672192015-02-04 13:10:31 +0800602 * Vop standby will take effect at end of current frame,
603 * if dsp hold valid irq happen, it means standby complete.
604 *
605 * we must wait standby complete when we want to disable aclk,
606 * if not, memory bus maybe dead.
Mark Yao2048e322014-08-22 18:36:26 +0800607 */
Mark Yao10672192015-02-04 13:10:31 +0800608 reinit_completion(&vop->dsp_hold_completion);
609 vop_dsp_hold_valid_irq_enable(vop);
610
Mark Yao2048e322014-08-22 18:36:26 +0800611 spin_lock(&vop->reg_lock);
612
613 VOP_CTRL_SET(vop, standby, 1);
614
615 spin_unlock(&vop->reg_lock);
Mark Yao52ab7892015-01-22 18:29:57 +0800616
Mark Yao10672192015-02-04 13:10:31 +0800617 wait_for_completion(&vop->dsp_hold_completion);
Mark Yao2048e322014-08-22 18:36:26 +0800618
Mark Yao10672192015-02-04 13:10:31 +0800619 vop_dsp_hold_valid_irq_disable(vop);
620
621 disable_irq(vop->irq);
622
623 vop->is_enabled = false;
624
625 /*
626 * vop standby complete, so iommu detach is safe.
627 */
Mark Yao2048e322014-08-22 18:36:26 +0800628 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
629
Mark Yao10672192015-02-04 13:10:31 +0800630 clk_disable(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +0800631 clk_disable(vop->aclk);
632 clk_disable(vop->hclk);
Mark Yao5d82d1a2015-04-01 13:48:53 +0800633 pm_runtime_put(vop->dev);
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200634
635 if (crtc->state->event && !crtc->state->active) {
636 spin_lock_irq(&crtc->dev->event_lock);
637 drm_crtc_send_vblank_event(crtc, crtc->state->event);
638 spin_unlock_irq(&crtc->dev->event_lock);
639
640 crtc->state->event = NULL;
641 }
Mark Yao2048e322014-08-22 18:36:26 +0800642}
643
Mark Yao63ebb9f2015-11-30 18:22:42 +0800644static void vop_plane_destroy(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800645{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800646 drm_plane_cleanup(plane);
Mark Yao2048e322014-08-22 18:36:26 +0800647}
648
Mark Yao63ebb9f2015-11-30 18:22:42 +0800649static int vop_plane_atomic_check(struct drm_plane *plane,
650 struct drm_plane_state *state)
Mark Yao2048e322014-08-22 18:36:26 +0800651{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800652 struct drm_crtc *crtc = state->crtc;
John Keeping92915da2016-03-04 11:04:03 +0000653 struct drm_crtc_state *crtc_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800654 struct drm_framebuffer *fb = state->fb;
Mark Yao2048e322014-08-22 18:36:26 +0800655 struct vop_win *vop_win = to_vop_win(plane);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800656 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
Mark Yao2048e322014-08-22 18:36:26 +0800657 const struct vop_win_data *win = vop_win->data;
Mark Yao2048e322014-08-22 18:36:26 +0800658 int ret;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800659 struct drm_rect clip;
Mark Yao4c156c22015-06-26 17:14:46 +0800660 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
661 DRM_PLANE_HELPER_NO_SCALING;
662 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
663 DRM_PLANE_HELPER_NO_SCALING;
Mark Yao2048e322014-08-22 18:36:26 +0800664
Mark Yao63ebb9f2015-11-30 18:22:42 +0800665 if (!crtc || !fb)
666 goto out_disable;
John Keeping92915da2016-03-04 11:04:03 +0000667
668 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
669 if (WARN_ON(!crtc_state))
670 return -EINVAL;
671
Mark Yao63ebb9f2015-11-30 18:22:42 +0800672 clip.x1 = 0;
673 clip.y1 = 0;
John Keeping92915da2016-03-04 11:04:03 +0000674 clip.x2 = crtc_state->adjusted_mode.hdisplay;
675 clip.y2 = crtc_state->adjusted_mode.vdisplay;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800676
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300677 ret = drm_plane_helper_check_state(state, &clip,
678 min_scale, max_scale,
679 true, true);
Mark Yao2048e322014-08-22 18:36:26 +0800680 if (ret)
681 return ret;
682
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300683 if (!state->visible)
Mark Yao63ebb9f2015-11-30 18:22:42 +0800684 goto out_disable;
Mark Yao2048e322014-08-22 18:36:26 +0800685
Mark Yao63ebb9f2015-11-30 18:22:42 +0800686 vop_plane_state->format = vop_convert_format(fb->pixel_format);
687 if (vop_plane_state->format < 0)
688 return vop_plane_state->format;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800689
Mark Yao63ebb9f2015-11-30 18:22:42 +0800690 /*
691 * Src.x1 can be odd when do clip, but yuv plane start point
692 * need align with 2 pixel.
693 */
Ville Syrjäläf9b96be2016-07-26 19:07:02 +0300694 if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800695 return -EINVAL;
696
697 vop_plane_state->enable = true;
698
699 return 0;
700
701out_disable:
702 vop_plane_state->enable = false;
703 return 0;
704}
705
706static void vop_plane_atomic_disable(struct drm_plane *plane,
707 struct drm_plane_state *old_state)
708{
709 struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
710 struct vop_win *vop_win = to_vop_win(plane);
711 const struct vop_win_data *win = vop_win->data;
712 struct vop *vop = to_vop(old_state->crtc);
713
714 if (!old_state->crtc)
715 return;
716
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200717 spin_lock_irq(&plane->dev->event_lock);
718 vop_win->enable = false;
719 vop_win->yrgb_mst = 0;
720 spin_unlock_irq(&plane->dev->event_lock);
721
Mark Yao63ebb9f2015-11-30 18:22:42 +0800722 spin_lock(&vop->reg_lock);
723
724 VOP_WIN_SET(vop, win, enable, 0);
725
726 spin_unlock(&vop->reg_lock);
727
728 vop_plane_state->enable = false;
729}
730
731static void vop_plane_atomic_update(struct drm_plane *plane,
732 struct drm_plane_state *old_state)
733{
734 struct drm_plane_state *state = plane->state;
735 struct drm_crtc *crtc = state->crtc;
736 struct vop_win *vop_win = to_vop_win(plane);
737 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
738 const struct vop_win_data *win = vop_win->data;
739 struct vop *vop = to_vop(state->crtc);
740 struct drm_framebuffer *fb = state->fb;
741 unsigned int actual_w, actual_h;
742 unsigned int dsp_stx, dsp_sty;
743 uint32_t act_info, dsp_info, dsp_st;
Ville Syrjäläac920282016-07-26 19:07:01 +0300744 struct drm_rect *src = &state->src;
745 struct drm_rect *dest = &state->dst;
Mark Yao63ebb9f2015-11-30 18:22:42 +0800746 struct drm_gem_object *obj, *uv_obj;
747 struct rockchip_gem_object *rk_obj, *rk_uv_obj;
748 unsigned long offset;
749 dma_addr_t dma_addr;
750 uint32_t val;
751 bool rb_swap;
752
753 /*
754 * can't update plane when vop is disabled.
755 */
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200756 if (WARN_ON(!crtc))
Mark Yao63ebb9f2015-11-30 18:22:42 +0800757 return;
758
759 if (WARN_ON(!vop->is_enabled))
760 return;
761
762 if (!vop_plane_state->enable) {
763 vop_plane_atomic_disable(plane, old_state);
764 return;
765 }
Mark Yao2048e322014-08-22 18:36:26 +0800766
767 obj = rockchip_fb_get_gem_obj(fb, 0);
Mark Yao2048e322014-08-22 18:36:26 +0800768 rk_obj = to_rockchip_obj(obj);
769
Mark Yao63ebb9f2015-11-30 18:22:42 +0800770 actual_w = drm_rect_width(src) >> 16;
771 actual_h = drm_rect_height(src) >> 16;
772 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800773
Mark Yao63ebb9f2015-11-30 18:22:42 +0800774 dsp_info = (drm_rect_height(dest) - 1) << 16;
775 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
Mark Yao2048e322014-08-22 18:36:26 +0800776
Mark Yao63ebb9f2015-11-30 18:22:42 +0800777 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
778 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
779 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
Mark Yao2048e322014-08-22 18:36:26 +0800780
Mark Yao63ebb9f2015-11-30 18:22:42 +0800781 offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
782 offset += (src->y1 >> 16) * fb->pitches[0];
783 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
Mark Yao2048e322014-08-22 18:36:26 +0800784
Daniel Vetter4f9d39a2016-06-08 14:19:11 +0200785 spin_lock_irq(&plane->dev->event_lock);
786 vop_win->enable = true;
787 vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
788 spin_unlock_irq(&plane->dev->event_lock);
789
Mark Yao63ebb9f2015-11-30 18:22:42 +0800790 spin_lock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800791
Mark Yao63ebb9f2015-11-30 18:22:42 +0800792 VOP_WIN_SET(vop, win, format, vop_plane_state->format);
793 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
794 VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
795 if (is_yuv_support(fb->pixel_format)) {
Mark Yao84c7f8c2015-07-20 16:16:49 +0800796 int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
797 int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
798 int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
799
800 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800801 rk_uv_obj = to_rockchip_obj(uv_obj);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800802
Mark Yao63ebb9f2015-11-30 18:22:42 +0800803 offset = (src->x1 >> 16) * bpp / hsub;
804 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
Mark Yao84c7f8c2015-07-20 16:16:49 +0800805
Mark Yao63ebb9f2015-11-30 18:22:42 +0800806 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
807 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
808 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
Mark Yao84c7f8c2015-07-20 16:16:49 +0800809 }
Mark Yao4c156c22015-06-26 17:14:46 +0800810
811 if (win->phy->scl)
812 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800813 drm_rect_width(dest), drm_rect_height(dest),
Mark Yao4c156c22015-06-26 17:14:46 +0800814 fb->pixel_format);
815
Mark Yao63ebb9f2015-11-30 18:22:42 +0800816 VOP_WIN_SET(vop, win, act_info, act_info);
817 VOP_WIN_SET(vop, win, dsp_info, dsp_info);
818 VOP_WIN_SET(vop, win, dsp_st, dsp_st);
Mark Yao4c156c22015-06-26 17:14:46 +0800819
Mark Yao63ebb9f2015-11-30 18:22:42 +0800820 rb_swap = has_rb_swapped(fb->pixel_format);
Tomasz Figa85a359f2015-05-11 19:55:39 +0900821 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
Mark Yao2048e322014-08-22 18:36:26 +0800822
Mark Yao63ebb9f2015-11-30 18:22:42 +0800823 if (is_alpha_support(fb->pixel_format)) {
Mark Yao2048e322014-08-22 18:36:26 +0800824 VOP_WIN_SET(vop, win, dst_alpha_ctl,
825 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
826 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
827 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
828 SRC_BLEND_M0(ALPHA_PER_PIX) |
829 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
830 SRC_FACTOR_M0(ALPHA_ONE);
831 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
832 } else {
833 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
834 }
835
836 VOP_WIN_SET(vop, win, enable, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800837 spin_unlock(&vop->reg_lock);
Mark Yao2048e322014-08-22 18:36:26 +0800838}
839
Mark Yao63ebb9f2015-11-30 18:22:42 +0800840static const struct drm_plane_helper_funcs plane_helper_funcs = {
841 .atomic_check = vop_plane_atomic_check,
842 .atomic_update = vop_plane_atomic_update,
843 .atomic_disable = vop_plane_atomic_disable,
844};
845
John Keeping8ff490a2016-05-10 17:03:56 +0100846static void vop_atomic_plane_reset(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800847{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800848 struct vop_plane_state *vop_plane_state =
849 to_vop_plane_state(plane->state);
850
851 if (plane->state && plane->state->fb)
852 drm_framebuffer_unreference(plane->state->fb);
853
854 kfree(vop_plane_state);
855 vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
856 if (!vop_plane_state)
857 return;
858
859 plane->state = &vop_plane_state->base;
860 plane->state->plane = plane;
Mark Yao2048e322014-08-22 18:36:26 +0800861}
862
John Keeping8ff490a2016-05-10 17:03:56 +0100863static struct drm_plane_state *
Mark Yao63ebb9f2015-11-30 18:22:42 +0800864vop_atomic_plane_duplicate_state(struct drm_plane *plane)
Mark Yao2048e322014-08-22 18:36:26 +0800865{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800866 struct vop_plane_state *old_vop_plane_state;
867 struct vop_plane_state *vop_plane_state;
Mark Yao2048e322014-08-22 18:36:26 +0800868
Mark Yao63ebb9f2015-11-30 18:22:42 +0800869 if (WARN_ON(!plane->state))
870 return NULL;
Mark Yao2048e322014-08-22 18:36:26 +0800871
Mark Yao63ebb9f2015-11-30 18:22:42 +0800872 old_vop_plane_state = to_vop_plane_state(plane->state);
873 vop_plane_state = kmemdup(old_vop_plane_state,
874 sizeof(*vop_plane_state), GFP_KERNEL);
875 if (!vop_plane_state)
876 return NULL;
877
878 __drm_atomic_helper_plane_duplicate_state(plane,
879 &vop_plane_state->base);
880
881 return &vop_plane_state->base;
Mark Yao2048e322014-08-22 18:36:26 +0800882}
883
Mark Yao63ebb9f2015-11-30 18:22:42 +0800884static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
885 struct drm_plane_state *state)
Mark Yao2048e322014-08-22 18:36:26 +0800886{
Mark Yao63ebb9f2015-11-30 18:22:42 +0800887 struct vop_plane_state *vop_state = to_vop_plane_state(state);
Mark Yao2048e322014-08-22 18:36:26 +0800888
Daniel Vetter2f701692016-05-09 16:34:10 +0200889 __drm_atomic_helper_plane_destroy_state(state);
Mark Yao2048e322014-08-22 18:36:26 +0800890
Mark Yao63ebb9f2015-11-30 18:22:42 +0800891 kfree(vop_state);
Mark Yao2048e322014-08-22 18:36:26 +0800892}
893
894static const struct drm_plane_funcs vop_plane_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +0800895 .update_plane = drm_atomic_helper_update_plane,
896 .disable_plane = drm_atomic_helper_disable_plane,
Mark Yao2048e322014-08-22 18:36:26 +0800897 .destroy = vop_plane_destroy,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800898 .reset = vop_atomic_plane_reset,
899 .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
900 .atomic_destroy_state = vop_atomic_plane_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +0800901};
902
Mark Yao2048e322014-08-22 18:36:26 +0800903static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
904{
905 struct vop *vop = to_vop(crtc);
906 unsigned long flags;
907
Mark Yao63ebb9f2015-11-30 18:22:42 +0800908 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800909 return -EPERM;
910
911 spin_lock_irqsave(&vop->irq_lock, flags);
912
Tomasz Figafa374102016-09-14 21:54:54 +0900913 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
Mark Yaodbb3d942015-12-15 08:36:55 +0800914 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
Mark Yao2048e322014-08-22 18:36:26 +0800915
916 spin_unlock_irqrestore(&vop->irq_lock, flags);
917
918 return 0;
919}
920
921static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
922{
923 struct vop *vop = to_vop(crtc);
924 unsigned long flags;
925
Mark Yao63ebb9f2015-11-30 18:22:42 +0800926 if (WARN_ON(!vop->is_enabled))
Mark Yao2048e322014-08-22 18:36:26 +0800927 return;
Mark Yao31e980c2015-01-22 14:37:56 +0800928
Mark Yao2048e322014-08-22 18:36:26 +0800929 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +0800930
931 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
932
Mark Yao2048e322014-08-22 18:36:26 +0800933 spin_unlock_irqrestore(&vop->irq_lock, flags);
934}
935
Mark Yao63ebb9f2015-11-30 18:22:42 +0800936static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
937{
938 struct vop *vop = to_vop(crtc);
939
940 reinit_completion(&vop->wait_update_complete);
941 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
942}
943
Mark Yao2048e322014-08-22 18:36:26 +0800944static const struct rockchip_crtc_funcs private_crtc_funcs = {
945 .enable_vblank = vop_crtc_enable_vblank,
946 .disable_vblank = vop_crtc_disable_vblank,
Mark Yao63ebb9f2015-11-30 18:22:42 +0800947 .wait_for_update = vop_crtc_wait_for_update,
Mark Yao2048e322014-08-22 18:36:26 +0800948};
949
Mark Yao2048e322014-08-22 18:36:26 +0800950static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
951 const struct drm_display_mode *mode,
952 struct drm_display_mode *adjusted_mode)
953{
Chris Zhongb59b8de2016-01-06 12:03:53 +0800954 struct vop *vop = to_vop(crtc);
955
Chris Zhongb59b8de2016-01-06 12:03:53 +0800956 adjusted_mode->clock =
957 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
958
Mark Yao2048e322014-08-22 18:36:26 +0800959 return true;
960}
961
Mark Yao63ebb9f2015-11-30 18:22:42 +0800962static void vop_crtc_enable(struct drm_crtc *crtc)
Mark Yao2048e322014-08-22 18:36:26 +0800963{
964 struct vop *vop = to_vop(crtc);
Mark Yao4e257d92016-04-20 10:41:42 +0800965 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
Mark Yao63ebb9f2015-11-30 18:22:42 +0800966 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
Mark Yao2048e322014-08-22 18:36:26 +0800967 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
968 u16 hdisplay = adjusted_mode->hdisplay;
969 u16 htotal = adjusted_mode->htotal;
970 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
971 u16 hact_end = hact_st + hdisplay;
972 u16 vdisplay = adjusted_mode->vdisplay;
973 u16 vtotal = adjusted_mode->vtotal;
974 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
975 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
976 u16 vact_end = vact_st + vdisplay;
Mark Yao0a63bfd2016-04-20 14:18:16 +0800977 uint32_t pin_pol, val;
Sean Paul39a9ad82016-08-15 16:12:29 -0700978 int ret;
Mark Yao2048e322014-08-22 18:36:26 +0800979
Daniel Vetter893b6ca2016-06-08 14:19:12 +0200980 WARN_ON(vop->event);
981
Sean Paul39a9ad82016-08-15 16:12:29 -0700982 ret = vop_enable(crtc);
983 if (ret) {
984 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
985 return;
986 }
987
Mark Yao2048e322014-08-22 18:36:26 +0800988 /*
Mark Yaoce3887e2015-12-16 18:08:17 +0800989 * If dclk rate is zero, mean that scanout is stop,
990 * we don't need wait any more.
Mark Yao2048e322014-08-22 18:36:26 +0800991 */
Mark Yaoce3887e2015-12-16 18:08:17 +0800992 if (clk_get_rate(vop->dclk)) {
993 /*
994 * Rk3288 vop timing register is immediately, when configure
995 * display timing on display time, may cause tearing.
996 *
997 * Vop standby will take effect at end of current frame,
998 * if dsp hold valid irq happen, it means standby complete.
999 *
1000 * mode set:
1001 * standby and wait complete --> |----
1002 * | display time
1003 * |----
1004 * |---> dsp hold irq
1005 * configure display timing --> |
1006 * standby exit |
1007 * | new frame start.
1008 */
1009
1010 reinit_completion(&vop->dsp_hold_completion);
1011 vop_dsp_hold_valid_irq_enable(vop);
1012
1013 spin_lock(&vop->reg_lock);
1014
1015 VOP_CTRL_SET(vop, standby, 1);
1016
1017 spin_unlock(&vop->reg_lock);
1018
1019 wait_for_completion(&vop->dsp_hold_completion);
1020
1021 vop_dsp_hold_valid_irq_disable(vop);
1022 }
Mark Yao2048e322014-08-22 18:36:26 +08001023
Mark Yao0a63bfd2016-04-20 14:18:16 +08001024 pin_pol = 0x8;
1025 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
1026 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
1027 VOP_CTRL_SET(vop, pin_pol, pin_pol);
1028
Mark Yao4e257d92016-04-20 10:41:42 +08001029 switch (s->output_type) {
1030 case DRM_MODE_CONNECTOR_LVDS:
1031 VOP_CTRL_SET(vop, rgb_en, 1);
Mark Yao0a63bfd2016-04-20 14:18:16 +08001032 VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +08001033 break;
1034 case DRM_MODE_CONNECTOR_eDP:
Mark Yao0a63bfd2016-04-20 14:18:16 +08001035 VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +08001036 VOP_CTRL_SET(vop, edp_en, 1);
1037 break;
1038 case DRM_MODE_CONNECTOR_HDMIA:
Mark Yao0a63bfd2016-04-20 14:18:16 +08001039 VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +08001040 VOP_CTRL_SET(vop, hdmi_en, 1);
1041 break;
1042 case DRM_MODE_CONNECTOR_DSI:
Mark Yao0a63bfd2016-04-20 14:18:16 +08001043 VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
Mark Yao4e257d92016-04-20 10:41:42 +08001044 VOP_CTRL_SET(vop, mipi_en, 1);
1045 break;
1046 default:
Sean Paulee4d7892016-08-12 13:00:54 -04001047 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
1048 s->output_type);
Mark Yao4e257d92016-04-20 10:41:42 +08001049 }
1050 VOP_CTRL_SET(vop, out_mode, s->output_mode);
Mark Yao2048e322014-08-22 18:36:26 +08001051
1052 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
1053 val = hact_st << 16;
1054 val |= hact_end;
1055 VOP_CTRL_SET(vop, hact_st_end, val);
1056 VOP_CTRL_SET(vop, hpost_st_end, val);
1057
1058 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
1059 val = vact_st << 16;
1060 val |= vact_end;
1061 VOP_CTRL_SET(vop, vact_st_end, val);
1062 VOP_CTRL_SET(vop, vpost_st_end, val);
1063
Mark Yao2048e322014-08-22 18:36:26 +08001064 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
Mark Yaoce3887e2015-12-16 18:08:17 +08001065
1066 VOP_CTRL_SET(vop, standby, 0);
Sean Paulb883c9b2016-08-18 12:01:46 -07001067
1068 rockchip_drm_psr_activate(&vop->crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001069}
Mark Yao2048e322014-08-22 18:36:26 +08001070
Tomasz Figa7caecdb2016-09-14 21:54:56 +09001071static bool vop_fs_irq_is_pending(struct vop *vop)
1072{
1073 return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
1074}
1075
1076static void vop_wait_for_irq_handler(struct vop *vop)
1077{
1078 bool pending;
1079 int ret;
1080
1081 /*
1082 * Spin until frame start interrupt status bit goes low, which means
1083 * that interrupt handler was invoked and cleared it. The timeout of
1084 * 10 msecs is really too long, but it is just a safety measure if
1085 * something goes really wrong. The wait will only happen in the very
1086 * unlikely case of a vblank happening exactly at the same time and
1087 * shouldn't exceed microseconds range.
1088 */
1089 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
1090 !pending, 0, 10 * 1000);
1091 if (ret)
1092 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
1093
1094 synchronize_irq(vop->irq);
1095}
1096
Mark Yao63ebb9f2015-11-30 18:22:42 +08001097static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1098 struct drm_crtc_state *old_crtc_state)
1099{
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001100 struct drm_atomic_state *old_state = old_crtc_state->state;
1101 struct drm_plane_state *old_plane_state;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001102 struct vop *vop = to_vop(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001103 struct drm_plane *plane;
1104 int i;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001105
1106 if (WARN_ON(!vop->is_enabled))
1107 return;
1108
1109 spin_lock(&vop->reg_lock);
1110
1111 vop_cfg_done(vop);
1112
1113 spin_unlock(&vop->reg_lock);
Tomasz Figa7caecdb2016-09-14 21:54:56 +09001114
1115 /*
1116 * There is a (rather unlikely) possiblity that a vblank interrupt
1117 * fired before we set the cfg_done bit. To avoid spuriously
1118 * signalling flip completion we need to wait for it to finish.
1119 */
1120 vop_wait_for_irq_handler(vop);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001121
1122 for_each_plane_in_state(old_state, plane, old_plane_state, i) {
1123 if (!old_plane_state->fb)
1124 continue;
1125
1126 if (old_plane_state->fb == plane->state->fb)
1127 continue;
1128
1129 drm_framebuffer_reference(old_plane_state->fb);
1130 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1131 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1132 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1133 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001134}
1135
1136static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1137 struct drm_crtc_state *old_crtc_state)
1138{
1139 struct vop *vop = to_vop(crtc);
1140
Sean Paulb883c9b2016-08-18 12:01:46 -07001141 rockchip_drm_psr_flush(crtc);
1142
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001143 spin_lock_irq(&crtc->dev->event_lock);
Sean Paul5b680402016-08-10 16:24:39 -04001144 vop->vblank_active = true;
1145 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1146 WARN_ON(vop->event);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001147
Sean Paul5b680402016-08-10 16:24:39 -04001148 if (crtc->state->event) {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001149 vop->event = crtc->state->event;
1150 crtc->state->event = NULL;
1151 }
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001152 spin_unlock_irq(&crtc->dev->event_lock);
Mark Yao2048e322014-08-22 18:36:26 +08001153}
1154
Mark Yao2048e322014-08-22 18:36:26 +08001155static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
Mark Yao0ad36752015-11-09 11:33:16 +08001156 .enable = vop_crtc_enable,
1157 .disable = vop_crtc_disable,
Mark Yao2048e322014-08-22 18:36:26 +08001158 .mode_fixup = vop_crtc_mode_fixup,
Mark Yao63ebb9f2015-11-30 18:22:42 +08001159 .atomic_flush = vop_crtc_atomic_flush,
1160 .atomic_begin = vop_crtc_atomic_begin,
Mark Yao2048e322014-08-22 18:36:26 +08001161};
1162
Mark Yao2048e322014-08-22 18:36:26 +08001163static void vop_crtc_destroy(struct drm_crtc *crtc)
1164{
1165 drm_crtc_cleanup(crtc);
1166}
1167
John Keepingdc0b4082016-07-14 16:29:15 +01001168static void vop_crtc_reset(struct drm_crtc *crtc)
1169{
1170 if (crtc->state)
1171 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1172 kfree(crtc->state);
1173
1174 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
1175 if (crtc->state)
1176 crtc->state->crtc = crtc;
1177}
1178
Mark Yao4e257d92016-04-20 10:41:42 +08001179static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
1180{
1181 struct rockchip_crtc_state *rockchip_state;
1182
1183 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
1184 if (!rockchip_state)
1185 return NULL;
1186
1187 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
1188 return &rockchip_state->base;
1189}
1190
1191static void vop_crtc_destroy_state(struct drm_crtc *crtc,
1192 struct drm_crtc_state *state)
1193{
1194 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
1195
Daniel Vetterec2dc6a2016-05-09 16:34:09 +02001196 __drm_atomic_helper_crtc_destroy_state(&s->base);
Mark Yao4e257d92016-04-20 10:41:42 +08001197 kfree(s);
1198}
1199
Mark Yao2048e322014-08-22 18:36:26 +08001200static const struct drm_crtc_funcs vop_crtc_funcs = {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001201 .set_config = drm_atomic_helper_set_config,
1202 .page_flip = drm_atomic_helper_page_flip,
Mark Yao2048e322014-08-22 18:36:26 +08001203 .destroy = vop_crtc_destroy,
John Keepingdc0b4082016-07-14 16:29:15 +01001204 .reset = vop_crtc_reset,
Mark Yao4e257d92016-04-20 10:41:42 +08001205 .atomic_duplicate_state = vop_crtc_duplicate_state,
1206 .atomic_destroy_state = vop_crtc_destroy_state,
Mark Yao2048e322014-08-22 18:36:26 +08001207};
1208
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001209static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
1210{
1211 struct vop *vop = container_of(work, struct vop, fb_unref_work);
1212 struct drm_framebuffer *fb = val;
1213
1214 drm_crtc_vblank_put(&vop->crtc);
1215 drm_framebuffer_unreference(fb);
1216}
1217
Mark Yao63ebb9f2015-11-30 18:22:42 +08001218static bool vop_win_pending_is_complete(struct vop_win *vop_win)
Mark Yao2048e322014-08-22 18:36:26 +08001219{
Mark Yao63ebb9f2015-11-30 18:22:42 +08001220 dma_addr_t yrgb_mst;
Mark Yao2048e322014-08-22 18:36:26 +08001221
Daniel Vetter4f9d39a2016-06-08 14:19:11 +02001222 if (!vop_win->enable)
Mark Yao63ebb9f2015-11-30 18:22:42 +08001223 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
Mark Yao2048e322014-08-22 18:36:26 +08001224
Mark Yao63ebb9f2015-11-30 18:22:42 +08001225 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
Mark Yao2048e322014-08-22 18:36:26 +08001226
Daniel Vetter4f9d39a2016-06-08 14:19:11 +02001227 return yrgb_mst == vop_win->yrgb_mst;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001228}
Mark Yao2048e322014-08-22 18:36:26 +08001229
Mark Yao63ebb9f2015-11-30 18:22:42 +08001230static void vop_handle_vblank(struct vop *vop)
1231{
1232 struct drm_device *drm = vop->drm_dev;
1233 struct drm_crtc *crtc = &vop->crtc;
1234 unsigned long flags;
1235 int i;
Mark Yao2048e322014-08-22 18:36:26 +08001236
Mark Yao63ebb9f2015-11-30 18:22:42 +08001237 for (i = 0; i < vop->data->win_size; i++) {
1238 if (!vop_win_pending_is_complete(&vop->win[i]))
1239 return;
Mark Yao2048e322014-08-22 18:36:26 +08001240 }
1241
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001242 spin_lock_irqsave(&drm->event_lock, flags);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001243 if (vop->event) {
Mark Yao63ebb9f2015-11-30 18:22:42 +08001244 drm_crtc_send_vblank_event(crtc, vop->event);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001245 vop->event = NULL;
Mark Yao2048e322014-08-22 18:36:26 +08001246
Mark Yao2048e322014-08-22 18:36:26 +08001247 }
Sean Paul5b680402016-08-10 16:24:39 -04001248 if (vop->vblank_active) {
1249 vop->vblank_active = false;
1250 drm_crtc_vblank_put(crtc);
1251 }
Daniel Vetter893b6ca2016-06-08 14:19:12 +02001252 spin_unlock_irqrestore(&drm->event_lock, flags);
1253
Mark Yao63ebb9f2015-11-30 18:22:42 +08001254 if (!completion_done(&vop->wait_update_complete))
1255 complete(&vop->wait_update_complete);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001256
1257 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
1258 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
Mark Yao2048e322014-08-22 18:36:26 +08001259}
1260
1261static irqreturn_t vop_isr(int irq, void *data)
1262{
1263 struct vop *vop = data;
Mark Yaob5f7b752015-11-23 15:21:08 +08001264 struct drm_crtc *crtc = &vop->crtc;
Mark Yaodbb3d942015-12-15 08:36:55 +08001265 uint32_t active_irqs;
Mark Yao2048e322014-08-22 18:36:26 +08001266 unsigned long flags;
Mark Yao10672192015-02-04 13:10:31 +08001267 int ret = IRQ_NONE;
Mark Yao2048e322014-08-22 18:36:26 +08001268
1269 /*
Mark Yaodbb3d942015-12-15 08:36:55 +08001270 * interrupt register has interrupt status, enable and clear bits, we
Mark Yao2048e322014-08-22 18:36:26 +08001271 * must hold irq_lock to avoid a race with enable/disable_vblank().
1272 */
1273 spin_lock_irqsave(&vop->irq_lock, flags);
Mark Yaodbb3d942015-12-15 08:36:55 +08001274
1275 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
Mark Yao2048e322014-08-22 18:36:26 +08001276 /* Clear all active interrupt sources */
1277 if (active_irqs)
Mark Yaodbb3d942015-12-15 08:36:55 +08001278 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
1279
Mark Yao2048e322014-08-22 18:36:26 +08001280 spin_unlock_irqrestore(&vop->irq_lock, flags);
1281
1282 /* This is expected for vop iommu irqs, since the irq is shared */
1283 if (!active_irqs)
1284 return IRQ_NONE;
1285
Mark Yao10672192015-02-04 13:10:31 +08001286 if (active_irqs & DSP_HOLD_VALID_INTR) {
1287 complete(&vop->dsp_hold_completion);
1288 active_irqs &= ~DSP_HOLD_VALID_INTR;
1289 ret = IRQ_HANDLED;
Mark Yao2048e322014-08-22 18:36:26 +08001290 }
1291
Yakir Yang69c34e42016-07-24 14:57:40 +08001292 if (active_irqs & LINE_FLAG_INTR) {
1293 complete(&vop->line_flag_completion);
1294 active_irqs &= ~LINE_FLAG_INTR;
1295 ret = IRQ_HANDLED;
1296 }
1297
Mark Yao10672192015-02-04 13:10:31 +08001298 if (active_irqs & FS_INTR) {
Mark Yaob5f7b752015-11-23 15:21:08 +08001299 drm_crtc_handle_vblank(crtc);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001300 vop_handle_vblank(vop);
Mark Yao10672192015-02-04 13:10:31 +08001301 active_irqs &= ~FS_INTR;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001302 ret = IRQ_HANDLED;
Mark Yao10672192015-02-04 13:10:31 +08001303 }
Mark Yao2048e322014-08-22 18:36:26 +08001304
Mark Yao10672192015-02-04 13:10:31 +08001305 /* Unhandled irqs are spurious. */
1306 if (active_irqs)
Sean Paulee4d7892016-08-12 13:00:54 -04001307 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
1308 active_irqs);
Mark Yao10672192015-02-04 13:10:31 +08001309
1310 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001311}
1312
1313static int vop_create_crtc(struct vop *vop)
1314{
1315 const struct vop_data *vop_data = vop->data;
1316 struct device *dev = vop->dev;
1317 struct drm_device *drm_dev = vop->drm_dev;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001318 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001319 struct drm_crtc *crtc = &vop->crtc;
1320 struct device_node *port;
1321 int ret;
1322 int i;
1323
1324 /*
1325 * Create drm_plane for primary and cursor planes first, since we need
1326 * to pass them to drm_crtc_init_with_planes, which sets the
1327 * "possible_crtcs" to the newly initialized crtc.
1328 */
1329 for (i = 0; i < vop_data->win_size; i++) {
1330 struct vop_win *vop_win = &vop->win[i];
1331 const struct vop_win_data *win_data = vop_win->data;
1332
1333 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1334 win_data->type != DRM_PLANE_TYPE_CURSOR)
1335 continue;
1336
1337 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1338 0, &vop_plane_funcs,
1339 win_data->phy->data_formats,
1340 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001341 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001342 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001343 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
1344 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001345 goto err_cleanup_planes;
1346 }
1347
1348 plane = &vop_win->base;
Mark Yao63ebb9f2015-11-30 18:22:42 +08001349 drm_plane_helper_add(plane, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001350 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1351 primary = plane;
1352 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1353 cursor = plane;
1354 }
1355
1356 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
Ville Syrjäläf9882872015-12-09 16:19:31 +02001357 &vop_crtc_funcs, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001358 if (ret)
Douglas Anderson328b51c2016-03-07 14:00:52 -08001359 goto err_cleanup_planes;
Mark Yao2048e322014-08-22 18:36:26 +08001360
1361 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1362
1363 /*
1364 * Create drm_planes for overlay windows with possible_crtcs restricted
1365 * to the newly created crtc.
1366 */
1367 for (i = 0; i < vop_data->win_size; i++) {
1368 struct vop_win *vop_win = &vop->win[i];
1369 const struct vop_win_data *win_data = vop_win->data;
1370 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1371
1372 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1373 continue;
1374
1375 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1376 possible_crtcs,
1377 &vop_plane_funcs,
1378 win_data->phy->data_formats,
1379 win_data->phy->nformats,
Ville Syrjäläb0b3b792015-12-09 16:19:55 +02001380 win_data->type, NULL);
Mark Yao2048e322014-08-22 18:36:26 +08001381 if (ret) {
Sean Paulee4d7892016-08-12 13:00:54 -04001382 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
1383 ret);
Mark Yao2048e322014-08-22 18:36:26 +08001384 goto err_cleanup_crtc;
1385 }
Mark Yao63ebb9f2015-11-30 18:22:42 +08001386 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001387 }
1388
1389 port = of_get_child_by_name(dev->of_node, "port");
1390 if (!port) {
Sean Paulee4d7892016-08-12 13:00:54 -04001391 DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
1392 dev->of_node->full_name);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001393 ret = -ENOENT;
Mark Yao2048e322014-08-22 18:36:26 +08001394 goto err_cleanup_crtc;
1395 }
1396
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001397 drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
1398 vop_fb_unref_worker);
1399
Mark Yao10672192015-02-04 13:10:31 +08001400 init_completion(&vop->dsp_hold_completion);
Mark Yao63ebb9f2015-11-30 18:22:42 +08001401 init_completion(&vop->wait_update_complete);
Yakir Yang69c34e42016-07-24 14:57:40 +08001402 init_completion(&vop->line_flag_completion);
Mark Yao2048e322014-08-22 18:36:26 +08001403 crtc->port = port;
Mark Yaob5f7b752015-11-23 15:21:08 +08001404 rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
Mark Yao2048e322014-08-22 18:36:26 +08001405
1406 return 0;
1407
1408err_cleanup_crtc:
1409 drm_crtc_cleanup(crtc);
1410err_cleanup_planes:
Douglas Anderson328b51c2016-03-07 14:00:52 -08001411 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1412 head)
Mark Yao2048e322014-08-22 18:36:26 +08001413 drm_plane_cleanup(plane);
1414 return ret;
1415}
1416
1417static void vop_destroy_crtc(struct vop *vop)
1418{
1419 struct drm_crtc *crtc = &vop->crtc;
Douglas Anderson328b51c2016-03-07 14:00:52 -08001420 struct drm_device *drm_dev = vop->drm_dev;
1421 struct drm_plane *plane, *tmp;
Mark Yao2048e322014-08-22 18:36:26 +08001422
Mark Yaob5f7b752015-11-23 15:21:08 +08001423 rockchip_unregister_crtc_funcs(crtc);
Mark Yao2048e322014-08-22 18:36:26 +08001424 of_node_put(crtc->port);
Douglas Anderson328b51c2016-03-07 14:00:52 -08001425
1426 /*
1427 * We need to cleanup the planes now. Why?
1428 *
1429 * The planes are "&vop->win[i].base". That means the memory is
1430 * all part of the big "struct vop" chunk of memory. That memory
1431 * was devm allocated and associated with this component. We need to
1432 * free it ourselves before vop_unbind() finishes.
1433 */
1434 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1435 head)
1436 vop_plane_destroy(plane);
1437
1438 /*
1439 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1440 * references the CRTC.
1441 */
Mark Yao2048e322014-08-22 18:36:26 +08001442 drm_crtc_cleanup(crtc);
Tomasz Figa47a7eb42016-09-14 21:54:57 +09001443 drm_flip_work_cleanup(&vop->fb_unref_work);
Mark Yao2048e322014-08-22 18:36:26 +08001444}
1445
1446static int vop_initial(struct vop *vop)
1447{
1448 const struct vop_data *vop_data = vop->data;
1449 const struct vop_reg_data *init_table = vop_data->init_table;
1450 struct reset_control *ahb_rst;
1451 int i, ret;
1452
1453 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1454 if (IS_ERR(vop->hclk)) {
1455 dev_err(vop->dev, "failed to get hclk source\n");
1456 return PTR_ERR(vop->hclk);
1457 }
1458 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1459 if (IS_ERR(vop->aclk)) {
1460 dev_err(vop->dev, "failed to get aclk source\n");
1461 return PTR_ERR(vop->aclk);
1462 }
1463 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1464 if (IS_ERR(vop->dclk)) {
1465 dev_err(vop->dev, "failed to get dclk source\n");
1466 return PTR_ERR(vop->dclk);
1467 }
1468
Mark Yao2048e322014-08-22 18:36:26 +08001469 ret = clk_prepare(vop->dclk);
1470 if (ret < 0) {
1471 dev_err(vop->dev, "failed to prepare dclk\n");
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001472 return ret;
Mark Yao2048e322014-08-22 18:36:26 +08001473 }
1474
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001475 /* Enable both the hclk and aclk to setup the vop */
1476 ret = clk_prepare_enable(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001477 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001478 dev_err(vop->dev, "failed to prepare/enable hclk\n");
Mark Yao2048e322014-08-22 18:36:26 +08001479 goto err_unprepare_dclk;
1480 }
1481
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001482 ret = clk_prepare_enable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001483 if (ret < 0) {
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001484 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1485 goto err_disable_hclk;
Mark Yao2048e322014-08-22 18:36:26 +08001486 }
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001487
Mark Yao2048e322014-08-22 18:36:26 +08001488 /*
1489 * do hclk_reset, reset all vop registers.
1490 */
1491 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1492 if (IS_ERR(ahb_rst)) {
1493 dev_err(vop->dev, "failed to get ahb reset\n");
1494 ret = PTR_ERR(ahb_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001495 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001496 }
1497 reset_control_assert(ahb_rst);
1498 usleep_range(10, 20);
1499 reset_control_deassert(ahb_rst);
1500
1501 memcpy(vop->regsbak, vop->regs, vop->len);
1502
1503 for (i = 0; i < vop_data->table_size; i++)
1504 vop_writel(vop, init_table[i].offset, init_table[i].value);
1505
1506 for (i = 0; i < vop_data->win_size; i++) {
1507 const struct vop_win_data *win = &vop_data->win[i];
1508
1509 VOP_WIN_SET(vop, win, enable, 0);
1510 }
1511
1512 vop_cfg_done(vop);
1513
1514 /*
1515 * do dclk_reset, let all config take affect.
1516 */
1517 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1518 if (IS_ERR(vop->dclk_rst)) {
1519 dev_err(vop->dev, "failed to get dclk reset\n");
1520 ret = PTR_ERR(vop->dclk_rst);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001521 goto err_disable_aclk;
Mark Yao2048e322014-08-22 18:36:26 +08001522 }
1523 reset_control_assert(vop->dclk_rst);
1524 usleep_range(10, 20);
1525 reset_control_deassert(vop->dclk_rst);
1526
1527 clk_disable(vop->hclk);
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001528 clk_disable(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001529
Mark Yao31e980c2015-01-22 14:37:56 +08001530 vop->is_enabled = false;
Sean Paul5b680402016-08-10 16:24:39 -04001531 vop->vblank_active = false;
Mark Yao2048e322014-08-22 18:36:26 +08001532
1533 return 0;
1534
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001535err_disable_aclk:
1536 clk_disable_unprepare(vop->aclk);
Mark Yao2048e322014-08-22 18:36:26 +08001537err_disable_hclk:
Sjoerd Simonsd7b53fd2015-11-06 13:22:24 +01001538 clk_disable_unprepare(vop->hclk);
Mark Yao2048e322014-08-22 18:36:26 +08001539err_unprepare_dclk:
1540 clk_unprepare(vop->dclk);
Mark Yao2048e322014-08-22 18:36:26 +08001541 return ret;
1542}
1543
1544/*
1545 * Initialize the vop->win array elements.
1546 */
1547static void vop_win_init(struct vop *vop)
1548{
1549 const struct vop_data *vop_data = vop->data;
1550 unsigned int i;
1551
1552 for (i = 0; i < vop_data->win_size; i++) {
1553 struct vop_win *vop_win = &vop->win[i];
1554 const struct vop_win_data *win_data = &vop_data->win[i];
1555
1556 vop_win->data = win_data;
1557 vop_win->vop = vop;
Mark Yao2048e322014-08-22 18:36:26 +08001558 }
1559}
1560
Yakir Yang69c34e42016-07-24 14:57:40 +08001561/**
1562 * rockchip_drm_wait_line_flag - acqiure the give line flag event
1563 * @crtc: CRTC to enable line flag
1564 * @line_num: interested line number
1565 * @mstimeout: millisecond for timeout
1566 *
1567 * Driver would hold here until the interested line flag interrupt have
1568 * happened or timeout to wait.
1569 *
1570 * Returns:
1571 * Zero on success, negative errno on failure.
1572 */
1573int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
1574 unsigned int mstimeout)
1575{
1576 struct vop *vop = to_vop(crtc);
1577 unsigned long jiffies_left;
1578
1579 if (!crtc || !vop->is_enabled)
1580 return -ENODEV;
1581
1582 if (line_num > crtc->mode.vtotal || mstimeout <= 0)
1583 return -EINVAL;
1584
1585 if (vop_line_flag_irq_is_enabled(vop))
1586 return -EBUSY;
1587
1588 reinit_completion(&vop->line_flag_completion);
1589 vop_line_flag_irq_enable(vop, line_num);
1590
1591 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
1592 msecs_to_jiffies(mstimeout));
1593 vop_line_flag_irq_disable(vop);
1594
1595 if (jiffies_left == 0) {
1596 dev_err(vop->dev, "Timeout waiting for IRQ\n");
1597 return -ETIMEDOUT;
1598 }
1599
1600 return 0;
1601}
1602EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
1603
Mark Yao2048e322014-08-22 18:36:26 +08001604static int vop_bind(struct device *dev, struct device *master, void *data)
1605{
1606 struct platform_device *pdev = to_platform_device(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001607 const struct vop_data *vop_data;
1608 struct drm_device *drm_dev = data;
1609 struct vop *vop;
1610 struct resource *res;
1611 size_t alloc_size;
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001612 int ret, irq;
Mark Yao2048e322014-08-22 18:36:26 +08001613
Mark Yaoa67719d2015-12-15 08:58:26 +08001614 vop_data = of_device_get_match_data(dev);
Mark Yao2048e322014-08-22 18:36:26 +08001615 if (!vop_data)
1616 return -ENODEV;
1617
1618 /* Allocate vop struct and its vop_win array */
1619 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1620 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1621 if (!vop)
1622 return -ENOMEM;
1623
1624 vop->dev = dev;
1625 vop->data = vop_data;
1626 vop->drm_dev = drm_dev;
1627 dev_set_drvdata(dev, vop);
1628
1629 vop_win_init(vop);
1630
1631 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1632 vop->len = resource_size(res);
1633 vop->regs = devm_ioremap_resource(dev, res);
1634 if (IS_ERR(vop->regs))
1635 return PTR_ERR(vop->regs);
1636
1637 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1638 if (!vop->regsbak)
1639 return -ENOMEM;
1640
1641 ret = vop_initial(vop);
1642 if (ret < 0) {
1643 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1644 return ret;
1645 }
1646
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001647 irq = platform_get_irq(pdev, 0);
1648 if (irq < 0) {
Mark Yao2048e322014-08-22 18:36:26 +08001649 dev_err(dev, "cannot find irq for vop\n");
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001650 return irq;
Mark Yao2048e322014-08-22 18:36:26 +08001651 }
Heiko Stuebner3ea68922015-04-20 01:00:53 +02001652 vop->irq = (unsigned int)irq;
Mark Yao2048e322014-08-22 18:36:26 +08001653
1654 spin_lock_init(&vop->reg_lock);
1655 spin_lock_init(&vop->irq_lock);
1656
1657 mutex_init(&vop->vsync_mutex);
1658
Mark Yao63ebb9f2015-11-30 18:22:42 +08001659 ret = devm_request_irq(dev, vop->irq, vop_isr,
1660 IRQF_SHARED, dev_name(dev), vop);
Mark Yao2048e322014-08-22 18:36:26 +08001661 if (ret)
1662 return ret;
1663
1664 /* IRQ is initially disabled; it gets enabled in power_on */
1665 disable_irq(vop->irq);
1666
1667 ret = vop_create_crtc(vop);
1668 if (ret)
1669 return ret;
1670
1671 pm_runtime_enable(&pdev->dev);
Yakir Yang5182c1a2016-07-24 14:57:44 +08001672
Mark Yao2048e322014-08-22 18:36:26 +08001673 return 0;
1674}
1675
1676static void vop_unbind(struct device *dev, struct device *master, void *data)
1677{
1678 struct vop *vop = dev_get_drvdata(dev);
1679
1680 pm_runtime_disable(dev);
1681 vop_destroy_crtc(vop);
1682}
1683
Mark Yaoa67719d2015-12-15 08:58:26 +08001684const struct component_ops vop_component_ops = {
Mark Yao2048e322014-08-22 18:36:26 +08001685 .bind = vop_bind,
1686 .unbind = vop_unbind,
1687};
Stephen Rothwell54255e82015-12-31 13:40:11 +11001688EXPORT_SYMBOL_GPL(vop_component_ops);