blob: 06e8240d5ff8ec86c29378440f7d09f90759741e [file] [log] [blame]
Rob Clark16ea9752013-01-08 15:04:28 -06001/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Sean Paulce2f2c32016-09-21 06:14:53 -070018#include <drm/drm_atomic.h>
Jyri Sarha305198d2016-04-07 15:05:16 +030019#include <drm/drm_atomic_helper.h>
Sean Paulce2f2c32016-09-21 06:14:53 -070020#include <drm/drm_crtc.h>
21#include <drm/drm_flip_work.h>
22#include <drm/drm_plane_helper.h>
Jyri Sarha4e910c72016-09-06 22:55:33 +030023#include <linux/workqueue.h>
Rob Clark16ea9752013-01-08 15:04:28 -060024
25#include "tilcdc_drv.h"
26#include "tilcdc_regs.h"
27
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020028#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
29
Rob Clark16ea9752013-01-08 15:04:28 -060030struct tilcdc_crtc {
31 struct drm_crtc base;
32
Jyri Sarha47f571c2016-04-07 15:04:18 +030033 struct drm_plane primary;
Rob Clark16ea9752013-01-08 15:04:28 -060034 const struct tilcdc_panel_info *info;
Rob Clark16ea9752013-01-08 15:04:28 -060035 struct drm_pending_vblank_event *event;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +030036 bool enabled;
Rob Clark16ea9752013-01-08 15:04:28 -060037 wait_queue_head_t frame_done_wq;
38 bool frame_done;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020039 spinlock_t irq_lock;
40
Jyri Sarha642e5162016-09-06 16:19:54 +030041 unsigned int lcd_fck_rate;
42
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020043 ktime_t last_vblank;
Rob Clark16ea9752013-01-08 15:04:28 -060044
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030045 struct drm_framebuffer *curr_fb;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020046 struct drm_framebuffer *next_fb;
Rob Clark16ea9752013-01-08 15:04:28 -060047
48 /* for deferred fb unref's: */
Rob Clarka464d612013-08-07 13:41:20 -040049 struct drm_flip_work unref_work;
Jyri Sarha103cd8b2015-02-10 14:13:23 +020050
51 /* Only set if an external encoder is connected */
52 bool simulate_vesa_sync;
Jyri Sarha5895d082016-01-08 14:33:09 +020053
54 int sync_lost_count;
55 bool frame_intact;
Rob Clark16ea9752013-01-08 15:04:28 -060056};
57#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
58
Rob Clarka464d612013-08-07 13:41:20 -040059static void unref_worker(struct drm_flip_work *work, void *val)
Rob Clark16ea9752013-01-08 15:04:28 -060060{
Darren Etheridgef7b45752013-06-21 13:52:26 -050061 struct tilcdc_crtc *tilcdc_crtc =
Rob Clarka464d612013-08-07 13:41:20 -040062 container_of(work, struct tilcdc_crtc, unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -060063 struct drm_device *dev = tilcdc_crtc->base.dev;
Rob Clark16ea9752013-01-08 15:04:28 -060064
65 mutex_lock(&dev->mode_config.mutex);
Rob Clarka464d612013-08-07 13:41:20 -040066 drm_framebuffer_unreference(val);
Rob Clark16ea9752013-01-08 15:04:28 -060067 mutex_unlock(&dev->mode_config.mutex);
68}
69
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030070static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
Rob Clark16ea9752013-01-08 15:04:28 -060071{
72 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
73 struct drm_device *dev = crtc->dev;
Daniel Schultz4c268d62016-10-28 13:52:41 +020074 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -060075 struct drm_gem_cma_object *gem;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030076 dma_addr_t start, end;
Jyri Sarha7eb9f062016-08-26 15:10:14 +030077 u64 dma_base_and_ceiling;
Rob Clark16ea9752013-01-08 15:04:28 -060078
Rob Clark16ea9752013-01-08 15:04:28 -060079 gem = drm_fb_cma_get_gem_obj(fb, 0);
80
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030081 start = gem->paddr + fb->offsets[0] +
82 crtc->y * fb->pitches[0] +
Laurent Pinchart59f11a42016-10-18 01:41:14 +030083 crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
Rob Clark16ea9752013-01-08 15:04:28 -060084
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030085 end = start + (crtc->mode.vdisplay * fb->pitches[0]);
Rob Clark16ea9752013-01-08 15:04:28 -060086
Jyri Sarha7eb9f062016-08-26 15:10:14 +030087 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
88 * with a single insruction, if available. This should make it more
89 * unlikely that LCDC would fetch the DMA addresses in the middle of
90 * an update.
91 */
Daniel Schultz4c268d62016-10-28 13:52:41 +020092 if (priv->rev == 1)
93 end -= 1;
94
95 dma_base_and_ceiling = (u64)end << 32 | start;
Jyri Sarha7eb9f062016-08-26 15:10:14 +030096 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030097
98 if (tilcdc_crtc->curr_fb)
99 drm_flip_work_queue(&tilcdc_crtc->unref_work,
100 tilcdc_crtc->curr_fb);
101
102 tilcdc_crtc->curr_fb = fb;
Rob Clark16ea9752013-01-08 15:04:28 -0600103}
104
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300105static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
106{
107 struct tilcdc_drm_private *priv = dev->dev_private;
108
109 tilcdc_clear_irqstatus(dev, 0xffffffff);
110
111 if (priv->rev == 1) {
112 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
113 LCDC_V1_UNDERFLOW_INT_ENA);
Karl Beldan8d6c3f72016-08-23 12:57:00 +0000114 tilcdc_set(dev, LCDC_DMA_CTRL_REG,
115 LCDC_V1_END_OF_FRAME_INT_ENA);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300116 } else {
117 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
118 LCDC_V2_UNDERFLOW_INT_ENA |
119 LCDC_V2_END_OF_FRAME0_INT_ENA |
120 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
121 }
122}
123
124static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
125{
126 struct tilcdc_drm_private *priv = dev->dev_private;
127
128 /* disable irqs that we might have enabled: */
129 if (priv->rev == 1) {
130 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
131 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
132 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
133 LCDC_V1_END_OF_FRAME_INT_ENA);
134 } else {
135 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
136 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
137 LCDC_V2_END_OF_FRAME0_INT_ENA |
138 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
139 }
140}
141
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300142static void reset(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600143{
144 struct drm_device *dev = crtc->dev;
145 struct tilcdc_drm_private *priv = dev->dev_private;
146
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300147 if (priv->rev != 2)
148 return;
149
150 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
151 usleep_range(250, 1000);
152 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
153}
154
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300155static void tilcdc_crtc_enable(struct drm_crtc *crtc)
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300156{
157 struct drm_device *dev = crtc->dev;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300158 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
159
Jyri Sarha2e0965b2016-09-06 17:25:08 +0300160 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
161
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300162 if (tilcdc_crtc->enabled)
163 return;
164
165 pm_runtime_get_sync(dev->dev);
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300166
167 reset(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600168
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300169 tilcdc_crtc_enable_irqs(dev);
170
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300171 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
Rob Clark16ea9752013-01-08 15:04:28 -0600172 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
173 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300174
175 drm_crtc_vblank_on(crtc);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300176
177 tilcdc_crtc->enabled = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600178}
179
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300180void tilcdc_crtc_disable(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600181{
Jyri Sarha2d5be882016-04-07 20:20:23 +0300182 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600183 struct drm_device *dev = crtc->dev;
Jyri Sarha2d5be882016-04-07 20:20:23 +0300184 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600185
Jyri Sarha2e0965b2016-09-06 17:25:08 +0300186 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
187
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300188 if (!tilcdc_crtc->enabled)
189 return;
190
Jyri Sarha2d5be882016-04-07 20:20:23 +0300191 tilcdc_crtc->frame_done = false;
Rob Clark16ea9752013-01-08 15:04:28 -0600192 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha2d5be882016-04-07 20:20:23 +0300193
194 /*
195 * if necessary wait for framedone irq which will still come
196 * before putting things to sleep..
197 */
198 if (priv->rev == 2) {
199 int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
200 tilcdc_crtc->frame_done,
Jyri Sarha437c7d92016-06-16 16:19:17 +0300201 msecs_to_jiffies(500));
Jyri Sarha2d5be882016-04-07 20:20:23 +0300202 if (ret == 0)
203 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
204 __func__);
205 }
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300206
207 drm_crtc_vblank_off(crtc);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300208
209 tilcdc_crtc_disable_irqs(dev);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300210
211 pm_runtime_put_sync(dev->dev);
212
213 if (tilcdc_crtc->next_fb) {
214 drm_flip_work_queue(&tilcdc_crtc->unref_work,
215 tilcdc_crtc->next_fb);
216 tilcdc_crtc->next_fb = NULL;
217 }
218
219 if (tilcdc_crtc->curr_fb) {
220 drm_flip_work_queue(&tilcdc_crtc->unref_work,
221 tilcdc_crtc->curr_fb);
222 tilcdc_crtc->curr_fb = NULL;
223 }
224
225 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
226 tilcdc_crtc->last_vblank = ktime_set(0, 0);
227
228 tilcdc_crtc->enabled = false;
229}
230
231static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
232{
233 return crtc->state && crtc->state->enable && crtc->state->active;
Rob Clark16ea9752013-01-08 15:04:28 -0600234}
235
236static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
237{
238 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Jyri Sarha4e910c72016-09-06 22:55:33 +0300239 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600240
Jyri Sarha6c94c712016-09-07 11:46:40 +0300241 drm_modeset_lock_crtc(crtc, NULL);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300242 tilcdc_crtc_disable(crtc);
Jyri Sarha6c94c712016-09-07 11:46:40 +0300243 drm_modeset_unlock_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600244
Jyri Sarha4e910c72016-09-06 22:55:33 +0300245 flush_workqueue(priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600246
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300247 of_node_put(crtc->port);
Rob Clark16ea9752013-01-08 15:04:28 -0600248 drm_crtc_cleanup(crtc);
Rob Clarka464d612013-08-07 13:41:20 -0400249 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -0600250}
251
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300252int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
Rob Clark16ea9752013-01-08 15:04:28 -0600253 struct drm_framebuffer *fb,
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300254 struct drm_pending_vblank_event *event)
Rob Clark16ea9752013-01-08 15:04:28 -0600255{
256 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
257 struct drm_device *dev = crtc->dev;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300258 unsigned long flags;
Tomi Valkeinen6f206e92014-02-07 17:37:07 +0000259
Jyri Sarha2e0965b2016-09-06 17:25:08 +0300260 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
261
Rob Clark16ea9752013-01-08 15:04:28 -0600262 if (tilcdc_crtc->event) {
263 dev_err(dev->dev, "already pending page flip!\n");
264 return -EBUSY;
265 }
266
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300267 drm_framebuffer_reference(fb);
268
Matt Roperf4510a22014-04-01 15:22:40 -0700269 crtc->primary->fb = fb;
Tomi Valkeinen65734a22015-10-19 12:30:03 +0300270
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200271 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300272
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300273 if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
274 ktime_t next_vblank;
275 s64 tdiff;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300276
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300277 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
278 1000000 / crtc->hwmode.vrefresh);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200279
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300280 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
281
282 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
283 tilcdc_crtc->next_fb = fb;
284 }
285
286 if (tilcdc_crtc->next_fb != fb)
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200287 set_scanout(crtc, fb);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200288
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300289 tilcdc_crtc->event = event;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200290
291 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600292
293 return 0;
294}
295
Rob Clark16ea9752013-01-08 15:04:28 -0600296static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
297 const struct drm_display_mode *mode,
298 struct drm_display_mode *adjusted_mode)
299{
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200300 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
301
302 if (!tilcdc_crtc->simulate_vesa_sync)
303 return true;
304
305 /*
306 * tilcdc does not generate VESA-compliant sync but aligns
307 * VS on the second edge of HS instead of first edge.
308 * We use adjusted_mode, to fixup sync by aligning both rising
309 * edges and add HSKEW offset to fix the sync.
310 */
311 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
312 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
313
314 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
315 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
316 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
317 } else {
318 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
319 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
320 }
321
Rob Clark16ea9752013-01-08 15:04:28 -0600322 return true;
323}
324
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200325/*
326 * Calculate the percentage difference between the requested pixel clock rate
327 * and the effective rate resulting from calculating the clock divider value.
328 */
329static unsigned int tilcdc_pclk_diff(unsigned long rate,
330 unsigned long real_rate)
331{
332 int r = rate / 100, rr = real_rate / 100;
333
334 return (unsigned int)(abs(((rr - r) * 100) / r));
335}
336
Jyri Sarha642e5162016-09-06 16:19:54 +0300337static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
338{
339 struct drm_device *dev = crtc->dev;
340 struct tilcdc_drm_private *priv = dev->dev_private;
341 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200342 unsigned long clk_rate, real_rate, req_rate;
343 unsigned int clkdiv;
Jyri Sarha642e5162016-09-06 16:19:54 +0300344 int ret;
345
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200346 clkdiv = 2; /* first try using a standard divider of 2 */
347
Jyri Sarha642e5162016-09-06 16:19:54 +0300348 /* mode.clock is in KHz, set_rate wants parameter in Hz */
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200349 req_rate = crtc->mode.clock * 1000;
350
351 ret = clk_set_rate(priv->clk, req_rate * clkdiv);
352 clk_rate = clk_get_rate(priv->clk);
Jyri Sarha642e5162016-09-06 16:19:54 +0300353 if (ret < 0) {
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200354 /*
355 * If we fail to set the clock rate (some architectures don't
356 * use the common clock framework yet and may not implement
357 * all the clk API calls for every clock), try the next best
358 * thing: adjusting the clock divider, unless clk_get_rate()
359 * failed as well.
360 */
361 if (!clk_rate) {
362 /* Nothing more we can do. Just bail out. */
363 dev_err(dev->dev,
364 "failed to set the pixel clock - unable to read current lcdc clock rate\n");
365 return;
366 }
367
368 clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
369
370 /*
371 * Emit a warning if the real clock rate resulting from the
372 * calculated divider differs much from the requested rate.
373 *
374 * 5% is an arbitrary value - LCDs are usually quite tolerant
375 * about pixel clock rates.
376 */
377 real_rate = clkdiv * req_rate;
378
379 if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
380 dev_warn(dev->dev,
381 "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
382 clk_rate, real_rate);
383 }
Jyri Sarha642e5162016-09-06 16:19:54 +0300384 }
385
Bartosz Golaszewskicb42e202016-09-29 18:43:57 +0200386 tilcdc_crtc->lcd_fck_rate = clk_rate;
Jyri Sarha642e5162016-09-06 16:19:54 +0300387
388 DBG("lcd_clk=%u, mode clock=%d, div=%u",
389 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
390
391 /* Configure the LCD clock divisor. */
392 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
393 LCDC_RASTER_MODE);
394
395 if (priv->rev == 2)
396 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
397 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
398 LCDC_V2_CORE_CLK_EN);
399}
400
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300401static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
402{
403 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
404 struct drm_device *dev = crtc->dev;
405 struct tilcdc_drm_private *priv = dev->dev_private;
406 const struct tilcdc_panel_info *info = tilcdc_crtc->info;
407 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
408 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
409 struct drm_framebuffer *fb = crtc->primary->state->fb;
410
Jyri Sarha2e0965b2016-09-06 17:25:08 +0300411 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
412
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300413 if (WARN_ON(!info))
414 return;
415
416 if (WARN_ON(!fb))
417 return;
418
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300419 /* Configure the Burst Size and fifo threshold of DMA: */
420 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
421 switch (info->dma_burst_sz) {
422 case 1:
423 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
424 break;
425 case 2:
426 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
427 break;
428 case 4:
429 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
430 break;
431 case 8:
432 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
433 break;
434 case 16:
435 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
436 break;
437 default:
438 dev_err(dev->dev, "invalid burst size\n");
439 return;
440 }
441 reg |= (info->fifo_th << 8);
442 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
443
444 /* Configure timings: */
445 hbp = mode->htotal - mode->hsync_end;
446 hfp = mode->hsync_start - mode->hdisplay;
447 hsw = mode->hsync_end - mode->hsync_start;
448 vbp = mode->vtotal - mode->vsync_end;
449 vfp = mode->vsync_start - mode->vdisplay;
450 vsw = mode->vsync_end - mode->vsync_start;
451
452 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
453 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
454
455 /* Set AC Bias Period and Number of Transitions per Interrupt: */
456 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
457 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
458 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
459
460 /*
461 * subtract one from hfp, hbp, hsw because the hardware uses
462 * a value of 0 as 1
463 */
464 if (priv->rev == 2) {
465 /* clear bits we're going to set */
466 reg &= ~0x78000033;
467 reg |= ((hfp-1) & 0x300) >> 8;
468 reg |= ((hbp-1) & 0x300) >> 4;
469 reg |= ((hsw-1) & 0x3c0) << 21;
470 }
471 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
472
473 reg = (((mode->hdisplay >> 4) - 1) << 4) |
474 (((hbp-1) & 0xff) << 24) |
475 (((hfp-1) & 0xff) << 16) |
476 (((hsw-1) & 0x3f) << 10);
477 if (priv->rev == 2)
478 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
479 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
480
481 reg = ((mode->vdisplay - 1) & 0x3ff) |
482 ((vbp & 0xff) << 24) |
483 ((vfp & 0xff) << 16) |
484 (((vsw-1) & 0x3f) << 10);
485 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
486
487 /*
488 * be sure to set Bit 10 for the V2 LCDC controller,
489 * otherwise limited to 1024 pixels width, stopping
490 * 1920x1080 being supported.
491 */
492 if (priv->rev == 2) {
493 if ((mode->vdisplay - 1) & 0x400) {
494 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
495 LCDC_LPP_B10);
496 } else {
497 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
498 LCDC_LPP_B10);
499 }
500 }
501
502 /* Configure display type: */
503 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
504 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
505 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
506 0x000ff000 /* Palette Loading Delay bits */);
507 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
508 if (info->tft_alt_mode)
509 reg |= LCDC_TFT_ALT_ENABLE;
510 if (priv->rev == 2) {
Laurent Pinchart59f11a42016-10-18 01:41:14 +0300511 switch (fb->pixel_format) {
512 case DRM_FORMAT_BGR565:
513 case DRM_FORMAT_RGB565:
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300514 break;
Laurent Pinchart59f11a42016-10-18 01:41:14 +0300515 case DRM_FORMAT_XBGR8888:
516 case DRM_FORMAT_XRGB8888:
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300517 reg |= LCDC_V2_TFT_24BPP_UNPACK;
518 /* fallthrough */
Laurent Pinchart59f11a42016-10-18 01:41:14 +0300519 case DRM_FORMAT_BGR888:
520 case DRM_FORMAT_RGB888:
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300521 reg |= LCDC_V2_TFT_24BPP_MODE;
522 break;
523 default:
524 dev_err(dev->dev, "invalid pixel format\n");
525 return;
526 }
527 }
528 reg |= info->fdd < 12;
529 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
530
531 if (info->invert_pxl_clk)
532 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
533 else
534 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
535
536 if (info->sync_ctrl)
537 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
538 else
539 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
540
541 if (info->sync_edge)
542 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
543 else
544 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
545
546 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
547 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
548 else
549 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
550
551 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
552 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
553 else
554 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
555
556 if (info->raster_order)
557 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
558 else
559 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
560
561 drm_framebuffer_reference(fb);
562
563 set_scanout(crtc, fb);
564
Jyri Sarha642e5162016-09-06 16:19:54 +0300565 tilcdc_crtc_set_clk(crtc);
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300566
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300567 crtc->hwmode = crtc->state->adjusted_mode;
568}
569
Jyri Sarhadb380c52016-04-07 15:10:23 +0300570static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
571 struct drm_crtc_state *state)
572{
573 struct drm_display_mode *mode = &state->mode;
574 int ret;
575
576 /* If we are not active we don't care */
577 if (!state->active)
578 return 0;
579
580 if (state->state->planes[0].ptr != crtc->primary ||
581 state->state->planes[0].state == NULL ||
582 state->state->planes[0].state->crtc != crtc) {
583 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
584 return -EINVAL;
585 }
586
587 ret = tilcdc_crtc_mode_valid(crtc, mode);
588 if (ret) {
589 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
590 return -EINVAL;
591 }
592
593 return 0;
594}
595
Rob Clark16ea9752013-01-08 15:04:28 -0600596static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
Jyri Sarha305198d2016-04-07 15:05:16 +0300597 .destroy = tilcdc_crtc_destroy,
598 .set_config = drm_atomic_helper_set_config,
599 .page_flip = drm_atomic_helper_page_flip,
600 .reset = drm_atomic_helper_crtc_reset,
601 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
602 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
Rob Clark16ea9752013-01-08 15:04:28 -0600603};
604
605static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
Rob Clark16ea9752013-01-08 15:04:28 -0600606 .mode_fixup = tilcdc_crtc_mode_fixup,
Jyri Sarha305198d2016-04-07 15:05:16 +0300607 .enable = tilcdc_crtc_enable,
608 .disable = tilcdc_crtc_disable,
Jyri Sarhadb380c52016-04-07 15:10:23 +0300609 .atomic_check = tilcdc_crtc_atomic_check,
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300610 .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
Rob Clark16ea9752013-01-08 15:04:28 -0600611};
612
613int tilcdc_crtc_max_width(struct drm_crtc *crtc)
614{
615 struct drm_device *dev = crtc->dev;
616 struct tilcdc_drm_private *priv = dev->dev_private;
617 int max_width = 0;
618
619 if (priv->rev == 1)
620 max_width = 1024;
621 else if (priv->rev == 2)
622 max_width = 2048;
623
624 return max_width;
625}
626
627int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
628{
629 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
630 unsigned int bandwidth;
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500631 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
Rob Clark16ea9752013-01-08 15:04:28 -0600632
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500633 /*
634 * check to see if the width is within the range that
635 * the LCD Controller physically supports
636 */
Rob Clark16ea9752013-01-08 15:04:28 -0600637 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
638 return MODE_VIRTUAL_X;
639
640 /* width must be multiple of 16 */
641 if (mode->hdisplay & 0xf)
642 return MODE_VIRTUAL_X;
643
644 if (mode->vdisplay > 2048)
645 return MODE_VIRTUAL_Y;
646
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500647 DBG("Processing mode %dx%d@%d with pixel clock %d",
648 mode->hdisplay, mode->vdisplay,
649 drm_mode_vrefresh(mode), mode->clock);
650
651 hbp = mode->htotal - mode->hsync_end;
652 hfp = mode->hsync_start - mode->hdisplay;
653 hsw = mode->hsync_end - mode->hsync_start;
654 vbp = mode->vtotal - mode->vsync_end;
655 vfp = mode->vsync_start - mode->vdisplay;
656 vsw = mode->vsync_end - mode->vsync_start;
657
658 if ((hbp-1) & ~0x3ff) {
659 DBG("Pruning mode: Horizontal Back Porch out of range");
660 return MODE_HBLANK_WIDE;
661 }
662
663 if ((hfp-1) & ~0x3ff) {
664 DBG("Pruning mode: Horizontal Front Porch out of range");
665 return MODE_HBLANK_WIDE;
666 }
667
668 if ((hsw-1) & ~0x3ff) {
669 DBG("Pruning mode: Horizontal Sync Width out of range");
670 return MODE_HSYNC_WIDE;
671 }
672
673 if (vbp & ~0xff) {
674 DBG("Pruning mode: Vertical Back Porch out of range");
675 return MODE_VBLANK_WIDE;
676 }
677
678 if (vfp & ~0xff) {
679 DBG("Pruning mode: Vertical Front Porch out of range");
680 return MODE_VBLANK_WIDE;
681 }
682
683 if ((vsw-1) & ~0x3f) {
684 DBG("Pruning mode: Vertical Sync Width out of range");
685 return MODE_VSYNC_WIDE;
686 }
687
Darren Etheridge4e564342013-06-21 13:52:23 -0500688 /*
689 * some devices have a maximum allowed pixel clock
690 * configured from the DT
691 */
692 if (mode->clock > priv->max_pixelclock) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500693 DBG("Pruning mode: pixel clock too high");
Darren Etheridge4e564342013-06-21 13:52:23 -0500694 return MODE_CLOCK_HIGH;
695 }
696
697 /*
698 * some devices further limit the max horizontal resolution
699 * configured from the DT
700 */
701 if (mode->hdisplay > priv->max_width)
702 return MODE_BAD_WIDTH;
703
Rob Clark16ea9752013-01-08 15:04:28 -0600704 /* filter out modes that would require too much memory bandwidth: */
Darren Etheridge4e564342013-06-21 13:52:23 -0500705 bandwidth = mode->hdisplay * mode->vdisplay *
706 drm_mode_vrefresh(mode);
707 if (bandwidth > priv->max_bandwidth) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500708 DBG("Pruning mode: exceeds defined bandwidth limit");
Rob Clark16ea9752013-01-08 15:04:28 -0600709 return MODE_BAD;
Darren Etheridge4e564342013-06-21 13:52:23 -0500710 }
Rob Clark16ea9752013-01-08 15:04:28 -0600711
712 return MODE_OK;
713}
714
715void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
716 const struct tilcdc_panel_info *info)
717{
718 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
719 tilcdc_crtc->info = info;
720}
721
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200722void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
723 bool simulate_vesa_sync)
724{
725 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
726
727 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
728}
729
Rob Clark16ea9752013-01-08 15:04:28 -0600730void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
731{
Rob Clark16ea9752013-01-08 15:04:28 -0600732 struct drm_device *dev = crtc->dev;
733 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarha642e5162016-09-06 16:19:54 +0300734 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600735
Jyri Sarha642e5162016-09-06 16:19:54 +0300736 drm_modeset_lock_crtc(crtc, NULL);
737 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
738 if (tilcdc_crtc_is_on(crtc)) {
739 pm_runtime_get_sync(dev->dev);
740 tilcdc_crtc_disable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600741
Jyri Sarha642e5162016-09-06 16:19:54 +0300742 tilcdc_crtc_set_clk(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600743
Jyri Sarha642e5162016-09-06 16:19:54 +0300744 tilcdc_crtc_enable(crtc);
745 pm_runtime_put_sync(dev->dev);
746 }
Rob Clark16ea9752013-01-08 15:04:28 -0600747 }
Jyri Sarha642e5162016-09-06 16:19:54 +0300748 drm_modeset_unlock_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600749}
750
Jyri Sarha5895d082016-01-08 14:33:09 +0200751#define SYNC_LOST_COUNT_LIMIT 50
752
Rob Clark16ea9752013-01-08 15:04:28 -0600753irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
754{
755 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
756 struct drm_device *dev = crtc->dev;
757 struct tilcdc_drm_private *priv = dev->dev_private;
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300758 uint32_t stat;
Rob Clark16ea9752013-01-08 15:04:28 -0600759
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300760 stat = tilcdc_read_irqstatus(dev);
761 tilcdc_clear_irqstatus(dev, stat);
762
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300763 if (stat & LCDC_END_OF_FRAME0) {
Rob Clark16ea9752013-01-08 15:04:28 -0600764 unsigned long flags;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200765 bool skip_event = false;
766 ktime_t now;
767
768 now = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600769
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300770 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600771
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200772 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600773
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200774 tilcdc_crtc->last_vblank = now;
Rob Clark16ea9752013-01-08 15:04:28 -0600775
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200776 if (tilcdc_crtc->next_fb) {
777 set_scanout(crtc, tilcdc_crtc->next_fb);
778 tilcdc_crtc->next_fb = NULL;
779 skip_event = true;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300780 }
781
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200782 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
783
Gustavo Padovan099ede82016-07-04 21:04:52 -0300784 drm_crtc_handle_vblank(crtc);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200785
786 if (!skip_event) {
787 struct drm_pending_vblank_event *event;
788
789 spin_lock_irqsave(&dev->event_lock, flags);
790
791 event = tilcdc_crtc->event;
792 tilcdc_crtc->event = NULL;
793 if (event)
Gustavo Padovandfebc152016-04-14 10:48:22 -0700794 drm_crtc_send_vblank_event(crtc, event);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200795
796 spin_unlock_irqrestore(&dev->event_lock, flags);
797 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200798
799 if (tilcdc_crtc->frame_intact)
800 tilcdc_crtc->sync_lost_count = 0;
801 else
802 tilcdc_crtc->frame_intact = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600803 }
804
Jyri Sarha14944112016-04-07 20:36:48 +0300805 if (stat & LCDC_FIFO_UNDERFLOW)
806 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
807 __func__, stat);
808
809 /* For revision 2 only */
Rob Clark16ea9752013-01-08 15:04:28 -0600810 if (priv->rev == 2) {
811 if (stat & LCDC_FRAME_DONE) {
812 tilcdc_crtc->frame_done = true;
813 wake_up(&tilcdc_crtc->frame_done_wq);
814 }
Rob Clark16ea9752013-01-08 15:04:28 -0600815
Jyri Sarha1abcdac2016-06-17 11:54:06 +0300816 if (stat & LCDC_SYNC_LOST) {
817 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
818 __func__, stat);
819 tilcdc_crtc->frame_intact = false;
820 if (tilcdc_crtc->sync_lost_count++ >
821 SYNC_LOST_COUNT_LIMIT) {
822 dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
823 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
824 LCDC_SYNC_LOST);
825 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200826 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +0200827
Jyri Sarha14944112016-04-07 20:36:48 +0300828 /* Indicate to LCDC that the interrupt service routine has
829 * completed, see 13.3.6.1.6 in AM335x TRM.
830 */
831 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
832 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +0200833
Rob Clark16ea9752013-01-08 15:04:28 -0600834 return IRQ_HANDLED;
835}
836
Rob Clark16ea9752013-01-08 15:04:28 -0600837struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
838{
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300839 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600840 struct tilcdc_crtc *tilcdc_crtc;
841 struct drm_crtc *crtc;
842 int ret;
843
Jyri Sarhad0ec32c2016-02-23 12:44:27 +0200844 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
Rob Clark16ea9752013-01-08 15:04:28 -0600845 if (!tilcdc_crtc) {
846 dev_err(dev->dev, "allocation failed\n");
847 return NULL;
848 }
849
850 crtc = &tilcdc_crtc->base;
851
Jyri Sarha47f571c2016-04-07 15:04:18 +0300852 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
853 if (ret < 0)
854 goto fail;
855
Rob Clark16ea9752013-01-08 15:04:28 -0600856 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
857
Boris BREZILLONd7f8db52014-11-14 19:30:30 +0100858 drm_flip_work_init(&tilcdc_crtc->unref_work,
Rob Clarka464d612013-08-07 13:41:20 -0400859 "unref", unref_worker);
Rob Clark16ea9752013-01-08 15:04:28 -0600860
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200861 spin_lock_init(&tilcdc_crtc->irq_lock);
862
Jyri Sarha47f571c2016-04-07 15:04:18 +0300863 ret = drm_crtc_init_with_planes(dev, crtc,
864 &tilcdc_crtc->primary,
865 NULL,
866 &tilcdc_crtc_funcs,
867 "tilcdc crtc");
Rob Clark16ea9752013-01-08 15:04:28 -0600868 if (ret < 0)
869 goto fail;
870
871 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
872
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300873 if (priv->is_componentized) {
874 struct device_node *ports =
875 of_get_child_by_name(dev->dev->of_node, "ports");
876
877 if (ports) {
878 crtc->port = of_get_child_by_name(ports, "port");
879 of_node_put(ports);
880 } else {
881 crtc->port =
882 of_get_child_by_name(dev->dev->of_node, "port");
883 }
884 if (!crtc->port) { /* This should never happen */
885 dev_err(dev->dev, "Port node not found in %s\n",
886 dev->dev->of_node->full_name);
887 goto fail;
888 }
889 }
890
Rob Clark16ea9752013-01-08 15:04:28 -0600891 return crtc;
892
893fail:
894 tilcdc_crtc_destroy(crtc);
895 return NULL;
896}