blob: 6ef4d1a1e3a9944c9bff444a014a9f90a5825386 [file] [log] [blame]
Rob Clark16ea9752013-01-08 15:04:28 -06001/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Sean Paulce2f2c32016-09-21 06:14:53 -070018#include <drm/drm_atomic.h>
Jyri Sarha305198d2016-04-07 15:05:16 +030019#include <drm/drm_atomic_helper.h>
Sean Paulce2f2c32016-09-21 06:14:53 -070020#include <drm/drm_crtc.h>
21#include <drm/drm_flip_work.h>
22#include <drm/drm_plane_helper.h>
Jyri Sarha4e910c72016-09-06 22:55:33 +030023#include <linux/workqueue.h>
Bartosz Golaszewski93452352016-10-31 15:19:26 +010024#include <linux/completion.h>
25#include <linux/dma-mapping.h>
Rob Herring86418f92017-03-22 08:26:06 -050026#include <linux/of_graph.h>
Jyri Sarhace99f722017-10-12 12:19:46 +030027#include <linux/math64.h>
Rob Clark16ea9752013-01-08 15:04:28 -060028
29#include "tilcdc_drv.h"
30#include "tilcdc_regs.h"
31
Bartosz Golaszewski93452352016-10-31 15:19:26 +010032#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
Jyri Sarha55e165c2016-11-15 23:37:24 +020033#define TILCDC_PALETTE_SIZE 32
34#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020035
Rob Clark16ea9752013-01-08 15:04:28 -060036struct tilcdc_crtc {
37 struct drm_crtc base;
38
Jyri Sarha47f571c2016-04-07 15:04:18 +030039 struct drm_plane primary;
Rob Clark16ea9752013-01-08 15:04:28 -060040 const struct tilcdc_panel_info *info;
Rob Clark16ea9752013-01-08 15:04:28 -060041 struct drm_pending_vblank_event *event;
Jyri Sarha2d53a182016-10-25 12:27:31 +030042 struct mutex enable_lock;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +030043 bool enabled;
Jyri Sarha2d53a182016-10-25 12:27:31 +030044 bool shutdown;
Rob Clark16ea9752013-01-08 15:04:28 -060045 wait_queue_head_t frame_done_wq;
46 bool frame_done;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020047 spinlock_t irq_lock;
48
Jyri Sarha642e5162016-09-06 16:19:54 +030049 unsigned int lcd_fck_rate;
50
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020051 ktime_t last_vblank;
Jyri Sarhace99f722017-10-12 12:19:46 +030052 unsigned int hvtotal_us;
Rob Clark16ea9752013-01-08 15:04:28 -060053
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030054 struct drm_framebuffer *curr_fb;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020055 struct drm_framebuffer *next_fb;
Rob Clark16ea9752013-01-08 15:04:28 -060056
57 /* for deferred fb unref's: */
Rob Clarka464d612013-08-07 13:41:20 -040058 struct drm_flip_work unref_work;
Jyri Sarha103cd8b2015-02-10 14:13:23 +020059
60 /* Only set if an external encoder is connected */
61 bool simulate_vesa_sync;
Jyri Sarha5895d082016-01-08 14:33:09 +020062
63 int sync_lost_count;
64 bool frame_intact;
Jyri Sarha13b3d722016-04-06 14:02:38 +030065 struct work_struct recover_work;
Bartosz Golaszewski93452352016-10-31 15:19:26 +010066
67 dma_addr_t palette_dma_handle;
Jyri Sarha55e165c2016-11-15 23:37:24 +020068 u16 *palette_base;
Bartosz Golaszewski93452352016-10-31 15:19:26 +010069 struct completion palette_loaded;
Rob Clark16ea9752013-01-08 15:04:28 -060070};
71#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
72
Rob Clarka464d612013-08-07 13:41:20 -040073static void unref_worker(struct drm_flip_work *work, void *val)
Rob Clark16ea9752013-01-08 15:04:28 -060074{
Darren Etheridgef7b45752013-06-21 13:52:26 -050075 struct tilcdc_crtc *tilcdc_crtc =
Rob Clarka464d612013-08-07 13:41:20 -040076 container_of(work, struct tilcdc_crtc, unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -060077 struct drm_device *dev = tilcdc_crtc->base.dev;
Rob Clark16ea9752013-01-08 15:04:28 -060078
79 mutex_lock(&dev->mode_config.mutex);
Cihangir Akturk61dd13b2017-08-03 14:58:38 +030080 drm_framebuffer_put(val);
Rob Clark16ea9752013-01-08 15:04:28 -060081 mutex_unlock(&dev->mode_config.mutex);
82}
83
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030084static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
Rob Clark16ea9752013-01-08 15:04:28 -060085{
86 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
87 struct drm_device *dev = crtc->dev;
Daniel Schultz4c268d62016-10-28 13:52:41 +020088 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -060089 struct drm_gem_cma_object *gem;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030090 dma_addr_t start, end;
Jyri Sarha7eb9f062016-08-26 15:10:14 +030091 u64 dma_base_and_ceiling;
Rob Clark16ea9752013-01-08 15:04:28 -060092
Rob Clark16ea9752013-01-08 15:04:28 -060093 gem = drm_fb_cma_get_gem_obj(fb, 0);
94
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030095 start = gem->paddr + fb->offsets[0] +
96 crtc->y * fb->pitches[0] +
Ville Syrjälä353c8592016-12-14 23:30:57 +020097 crtc->x * fb->format->cpp[0];
Rob Clark16ea9752013-01-08 15:04:28 -060098
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030099 end = start + (crtc->mode.vdisplay * fb->pitches[0]);
Rob Clark16ea9752013-01-08 15:04:28 -0600100
Jyri Sarha7eb9f062016-08-26 15:10:14 +0300101 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
102 * with a single insruction, if available. This should make it more
103 * unlikely that LCDC would fetch the DMA addresses in the middle of
104 * an update.
105 */
Daniel Schultz4c268d62016-10-28 13:52:41 +0200106 if (priv->rev == 1)
107 end -= 1;
108
109 dma_base_and_ceiling = (u64)end << 32 | start;
Jyri Sarha7eb9f062016-08-26 15:10:14 +0300110 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300111
112 if (tilcdc_crtc->curr_fb)
113 drm_flip_work_queue(&tilcdc_crtc->unref_work,
114 tilcdc_crtc->curr_fb);
115
116 tilcdc_crtc->curr_fb = fb;
Rob Clark16ea9752013-01-08 15:04:28 -0600117}
118
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100119/*
Jyri Sarha55e165c2016-11-15 23:37:24 +0200120 * The driver currently only supports only true color formats. For
121 * true color the palette block is bypassed, but a 32 byte palette
122 * should still be loaded. The first 16-bit entry must be 0x4000 while
123 * all other entries must be zeroed.
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100124 */
125static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
126{
Jyri Sarha55e165c2016-11-15 23:37:24 +0200127 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
128 struct drm_device *dev = crtc->dev;
129 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarhae59f5af2016-11-17 18:46:16 +0200130 int ret;
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100131
Jyri Sarha274c34d2016-11-15 23:57:42 +0200132 reinit_completion(&tilcdc_crtc->palette_loaded);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100133
134 /* Tell the LCDC where the palette is located. */
135 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
136 tilcdc_crtc->palette_dma_handle);
137 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
Jyri Sarha55e165c2016-11-15 23:37:24 +0200138 (u32) tilcdc_crtc->palette_dma_handle +
139 TILCDC_PALETTE_SIZE - 1);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100140
Jyri Sarha55e165c2016-11-15 23:37:24 +0200141 /* Set dma load mode for palette loading only. */
142 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
143 LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
144 LCDC_PALETTE_LOAD_MODE_MASK);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100145
Jyri Sarha55e165c2016-11-15 23:37:24 +0200146 /* Enable DMA Palette Loaded Interrupt */
147 if (priv->rev == 1)
148 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
149 else
150 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
151
152 /* Enable LCDC DMA and wait for palette to be loaded. */
153 tilcdc_clear_irqstatus(dev, 0xffffffff);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100154 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
155
Jyri Sarhae59f5af2016-11-17 18:46:16 +0200156 ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
157 msecs_to_jiffies(50));
158 if (ret == 0)
159 dev_err(dev->dev, "%s: Palette loading timeout", __func__);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100160
Jyri Sarha55e165c2016-11-15 23:37:24 +0200161 /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100162 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha55e165c2016-11-15 23:37:24 +0200163 if (priv->rev == 1)
164 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
165 else
166 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100167}
168
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300169static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
170{
171 struct tilcdc_drm_private *priv = dev->dev_private;
172
173 tilcdc_clear_irqstatus(dev, 0xffffffff);
174
175 if (priv->rev == 1) {
176 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
Jyri Sarha36725832016-11-21 18:30:19 +0200177 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300178 LCDC_V1_UNDERFLOW_INT_ENA);
Karl Beldan8d6c3f72016-08-23 12:57:00 +0000179 tilcdc_set(dev, LCDC_DMA_CTRL_REG,
180 LCDC_V1_END_OF_FRAME_INT_ENA);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300181 } else {
182 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
183 LCDC_V2_UNDERFLOW_INT_ENA |
184 LCDC_V2_END_OF_FRAME0_INT_ENA |
185 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
186 }
187}
188
189static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
190{
191 struct tilcdc_drm_private *priv = dev->dev_private;
192
193 /* disable irqs that we might have enabled: */
194 if (priv->rev == 1) {
195 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
Jyri Sarha36725832016-11-21 18:30:19 +0200196 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300197 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
198 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
199 LCDC_V1_END_OF_FRAME_INT_ENA);
200 } else {
201 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
202 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
203 LCDC_V2_END_OF_FRAME0_INT_ENA |
204 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
205 }
206}
207
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300208static void reset(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600209{
210 struct drm_device *dev = crtc->dev;
211 struct tilcdc_drm_private *priv = dev->dev_private;
212
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300213 if (priv->rev != 2)
214 return;
215
216 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
217 usleep_range(250, 1000);
218 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
219}
220
Jyri Sarha75d7f272016-11-24 23:25:08 +0200221/*
222 * Calculate the percentage difference between the requested pixel clock rate
223 * and the effective rate resulting from calculating the clock divider value.
224 */
225static unsigned int tilcdc_pclk_diff(unsigned long rate,
226 unsigned long real_rate)
227{
228 int r = rate / 100, rr = real_rate / 100;
229
230 return (unsigned int)(abs(((rr - r) * 100) / r));
231}
232
233static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
234{
235 struct drm_device *dev = crtc->dev;
236 struct tilcdc_drm_private *priv = dev->dev_private;
237 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
238 unsigned long clk_rate, real_rate, req_rate;
239 unsigned int clkdiv;
240 int ret;
241
242 clkdiv = 2; /* first try using a standard divider of 2 */
243
244 /* mode.clock is in KHz, set_rate wants parameter in Hz */
245 req_rate = crtc->mode.clock * 1000;
246
247 ret = clk_set_rate(priv->clk, req_rate * clkdiv);
248 clk_rate = clk_get_rate(priv->clk);
249 if (ret < 0) {
250 /*
251 * If we fail to set the clock rate (some architectures don't
252 * use the common clock framework yet and may not implement
253 * all the clk API calls for every clock), try the next best
254 * thing: adjusting the clock divider, unless clk_get_rate()
255 * failed as well.
256 */
257 if (!clk_rate) {
258 /* Nothing more we can do. Just bail out. */
259 dev_err(dev->dev,
260 "failed to set the pixel clock - unable to read current lcdc clock rate\n");
261 return;
262 }
263
264 clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
265
266 /*
267 * Emit a warning if the real clock rate resulting from the
268 * calculated divider differs much from the requested rate.
269 *
270 * 5% is an arbitrary value - LCDs are usually quite tolerant
271 * about pixel clock rates.
272 */
273 real_rate = clkdiv * req_rate;
274
275 if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
276 dev_warn(dev->dev,
277 "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
278 clk_rate, real_rate);
279 }
280 }
281
282 tilcdc_crtc->lcd_fck_rate = clk_rate;
283
284 DBG("lcd_clk=%u, mode clock=%d, div=%u",
285 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
286
287 /* Configure the LCD clock divisor. */
288 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
289 LCDC_RASTER_MODE);
290
291 if (priv->rev == 2)
292 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
293 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
294 LCDC_V2_CORE_CLK_EN);
295}
296
Jyri Sarhace99f722017-10-12 12:19:46 +0300297uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
298{
299 return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
300 mode->clock);
301}
302
Jyri Sarha75d7f272016-11-24 23:25:08 +0200303static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
304{
305 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
306 struct drm_device *dev = crtc->dev;
307 struct tilcdc_drm_private *priv = dev->dev_private;
308 const struct tilcdc_panel_info *info = tilcdc_crtc->info;
309 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
310 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
311 struct drm_framebuffer *fb = crtc->primary->state->fb;
312
313 if (WARN_ON(!info))
314 return;
315
316 if (WARN_ON(!fb))
317 return;
318
319 /* Configure the Burst Size and fifo threshold of DMA: */
320 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
321 switch (info->dma_burst_sz) {
322 case 1:
323 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
324 break;
325 case 2:
326 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
327 break;
328 case 4:
329 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
330 break;
331 case 8:
332 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
333 break;
334 case 16:
335 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
336 break;
337 default:
338 dev_err(dev->dev, "invalid burst size\n");
339 return;
340 }
341 reg |= (info->fifo_th << 8);
342 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
343
344 /* Configure timings: */
345 hbp = mode->htotal - mode->hsync_end;
346 hfp = mode->hsync_start - mode->hdisplay;
347 hsw = mode->hsync_end - mode->hsync_start;
348 vbp = mode->vtotal - mode->vsync_end;
349 vfp = mode->vsync_start - mode->vdisplay;
350 vsw = mode->vsync_end - mode->vsync_start;
351
352 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
353 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
354
355 /* Set AC Bias Period and Number of Transitions per Interrupt: */
356 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
357 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
358 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
359
360 /*
361 * subtract one from hfp, hbp, hsw because the hardware uses
362 * a value of 0 as 1
363 */
364 if (priv->rev == 2) {
365 /* clear bits we're going to set */
366 reg &= ~0x78000033;
367 reg |= ((hfp-1) & 0x300) >> 8;
368 reg |= ((hbp-1) & 0x300) >> 4;
369 reg |= ((hsw-1) & 0x3c0) << 21;
370 }
371 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
372
373 reg = (((mode->hdisplay >> 4) - 1) << 4) |
374 (((hbp-1) & 0xff) << 24) |
375 (((hfp-1) & 0xff) << 16) |
376 (((hsw-1) & 0x3f) << 10);
377 if (priv->rev == 2)
378 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
379 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
380
381 reg = ((mode->vdisplay - 1) & 0x3ff) |
382 ((vbp & 0xff) << 24) |
383 ((vfp & 0xff) << 16) |
384 (((vsw-1) & 0x3f) << 10);
385 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
386
387 /*
388 * be sure to set Bit 10 for the V2 LCDC controller,
389 * otherwise limited to 1024 pixels width, stopping
390 * 1920x1080 being supported.
391 */
392 if (priv->rev == 2) {
393 if ((mode->vdisplay - 1) & 0x400) {
394 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
395 LCDC_LPP_B10);
396 } else {
397 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
398 LCDC_LPP_B10);
399 }
400 }
401
402 /* Configure display type: */
403 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
404 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
405 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
406 0x000ff000 /* Palette Loading Delay bits */);
407 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
408 if (info->tft_alt_mode)
409 reg |= LCDC_TFT_ALT_ENABLE;
410 if (priv->rev == 2) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200411 switch (fb->format->format) {
Jyri Sarha75d7f272016-11-24 23:25:08 +0200412 case DRM_FORMAT_BGR565:
413 case DRM_FORMAT_RGB565:
414 break;
415 case DRM_FORMAT_XBGR8888:
416 case DRM_FORMAT_XRGB8888:
417 reg |= LCDC_V2_TFT_24BPP_UNPACK;
418 /* fallthrough */
419 case DRM_FORMAT_BGR888:
420 case DRM_FORMAT_RGB888:
421 reg |= LCDC_V2_TFT_24BPP_MODE;
422 break;
423 default:
424 dev_err(dev->dev, "invalid pixel format\n");
425 return;
426 }
427 }
428 reg |= info->fdd < 12;
429 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
430
431 if (info->invert_pxl_clk)
432 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
433 else
434 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
435
436 if (info->sync_ctrl)
437 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
438 else
439 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
440
441 if (info->sync_edge)
442 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
443 else
444 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
445
446 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
447 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
448 else
449 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
450
451 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
452 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
453 else
454 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
455
456 if (info->raster_order)
457 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
458 else
459 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
460
461 tilcdc_crtc_set_clk(crtc);
462
463 tilcdc_crtc_load_palette(crtc);
464
465 set_scanout(crtc, fb);
466
Cihangir Akturk61dd13b2017-08-03 14:58:38 +0300467 drm_framebuffer_get(fb);
Jyri Sarha75d7f272016-11-24 23:25:08 +0200468
469 crtc->hwmode = crtc->state->adjusted_mode;
Jyri Sarhace99f722017-10-12 12:19:46 +0300470
471 tilcdc_crtc->hvtotal_us =
472 tilcdc_mode_hvtotal(&crtc->hwmode);
Jyri Sarha75d7f272016-11-24 23:25:08 +0200473}
474
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300475static void tilcdc_crtc_enable(struct drm_crtc *crtc)
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300476{
477 struct drm_device *dev = crtc->dev;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300478 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200479 unsigned long flags;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300480
Jyri Sarha2d53a182016-10-25 12:27:31 +0300481 mutex_lock(&tilcdc_crtc->enable_lock);
482 if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
483 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300484 return;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300485 }
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300486
487 pm_runtime_get_sync(dev->dev);
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300488
489 reset(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600490
Jyri Sarha75d7f272016-11-24 23:25:08 +0200491 tilcdc_crtc_set_mode(crtc);
492
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300493 tilcdc_crtc_enable_irqs(dev);
494
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300495 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
Jyri Sarhaf13e0882016-11-19 18:00:32 +0200496 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
497 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
498 LCDC_PALETTE_LOAD_MODE_MASK);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200499
500 /* There is no real chance for a race here as the time stamp
501 * is taken before the raster DMA is started. The spin-lock is
502 * taken to have a memory barrier after taking the time-stamp
503 * and to avoid a context switch between taking the stamp and
504 * enabling the raster.
505 */
506 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
507 tilcdc_crtc->last_vblank = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600508 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200509 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300510
511 drm_crtc_vblank_on(crtc);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300512
513 tilcdc_crtc->enabled = true;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300514 mutex_unlock(&tilcdc_crtc->enable_lock);
Rob Clark16ea9752013-01-08 15:04:28 -0600515}
516
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300517static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
518 struct drm_crtc_state *old_state)
519{
520 tilcdc_crtc_enable(crtc);
521}
522
Jyri Sarha2d53a182016-10-25 12:27:31 +0300523static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
Rob Clark16ea9752013-01-08 15:04:28 -0600524{
Jyri Sarha2d5be882016-04-07 20:20:23 +0300525 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600526 struct drm_device *dev = crtc->dev;
Jyri Sarha2d5be882016-04-07 20:20:23 +0300527 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarha75d7f272016-11-24 23:25:08 +0200528 int ret;
Rob Clark16ea9752013-01-08 15:04:28 -0600529
Jyri Sarha2d53a182016-10-25 12:27:31 +0300530 mutex_lock(&tilcdc_crtc->enable_lock);
531 if (shutdown)
532 tilcdc_crtc->shutdown = true;
533 if (!tilcdc_crtc->enabled) {
534 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300535 return;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300536 }
Jyri Sarha2d5be882016-04-07 20:20:23 +0300537 tilcdc_crtc->frame_done = false;
Rob Clark16ea9752013-01-08 15:04:28 -0600538 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha2d5be882016-04-07 20:20:23 +0300539
540 /*
Jyri Sarha75d7f272016-11-24 23:25:08 +0200541 * Wait for framedone irq which will still come before putting
542 * things to sleep..
Jyri Sarha2d5be882016-04-07 20:20:23 +0300543 */
Jyri Sarha75d7f272016-11-24 23:25:08 +0200544 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
545 tilcdc_crtc->frame_done,
546 msecs_to_jiffies(500));
547 if (ret == 0)
548 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
549 __func__);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300550
551 drm_crtc_vblank_off(crtc);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300552
553 tilcdc_crtc_disable_irqs(dev);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300554
555 pm_runtime_put_sync(dev->dev);
556
557 if (tilcdc_crtc->next_fb) {
558 drm_flip_work_queue(&tilcdc_crtc->unref_work,
559 tilcdc_crtc->next_fb);
560 tilcdc_crtc->next_fb = NULL;
561 }
562
563 if (tilcdc_crtc->curr_fb) {
564 drm_flip_work_queue(&tilcdc_crtc->unref_work,
565 tilcdc_crtc->curr_fb);
566 tilcdc_crtc->curr_fb = NULL;
567 }
568
569 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300570
571 tilcdc_crtc->enabled = false;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300572 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300573}
574
Jyri Sarha9e79e062016-10-18 23:23:27 +0300575static void tilcdc_crtc_disable(struct drm_crtc *crtc)
576{
Jyri Sarha2d53a182016-10-25 12:27:31 +0300577 tilcdc_crtc_off(crtc, false);
578}
579
Laurent Pinchart64581712017-06-30 12:36:45 +0300580static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
581 struct drm_crtc_state *old_state)
582{
583 tilcdc_crtc_disable(crtc);
584}
585
Jyri Sarha2d53a182016-10-25 12:27:31 +0300586void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
587{
588 tilcdc_crtc_off(crtc, true);
Jyri Sarha9e79e062016-10-18 23:23:27 +0300589}
590
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300591static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
592{
593 return crtc->state && crtc->state->enable && crtc->state->active;
Rob Clark16ea9752013-01-08 15:04:28 -0600594}
595
Jyri Sarha13b3d722016-04-06 14:02:38 +0300596static void tilcdc_crtc_recover_work(struct work_struct *work)
597{
598 struct tilcdc_crtc *tilcdc_crtc =
599 container_of(work, struct tilcdc_crtc, recover_work);
600 struct drm_crtc *crtc = &tilcdc_crtc->base;
601
602 dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
603
Daniel Vetter33e5b662017-03-22 22:50:47 +0100604 drm_modeset_lock(&crtc->mutex, NULL);
Jyri Sarha13b3d722016-04-06 14:02:38 +0300605
606 if (!tilcdc_crtc_is_on(crtc))
607 goto out;
608
609 tilcdc_crtc_disable(crtc);
610 tilcdc_crtc_enable(crtc);
611out:
Daniel Vetter33e5b662017-03-22 22:50:47 +0100612 drm_modeset_unlock(&crtc->mutex);
Jyri Sarha13b3d722016-04-06 14:02:38 +0300613}
614
Rob Clark16ea9752013-01-08 15:04:28 -0600615static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
616{
617 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Jyri Sarha4e910c72016-09-06 22:55:33 +0300618 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600619
Jyri Sarhaba3fd952017-05-29 22:09:44 +0300620 tilcdc_crtc_shutdown(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600621
Jyri Sarha4e910c72016-09-06 22:55:33 +0300622 flush_workqueue(priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600623
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300624 of_node_put(crtc->port);
Rob Clark16ea9752013-01-08 15:04:28 -0600625 drm_crtc_cleanup(crtc);
Rob Clarka464d612013-08-07 13:41:20 -0400626 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -0600627}
628
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300629int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
Rob Clark16ea9752013-01-08 15:04:28 -0600630 struct drm_framebuffer *fb,
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300631 struct drm_pending_vblank_event *event)
Rob Clark16ea9752013-01-08 15:04:28 -0600632{
633 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
634 struct drm_device *dev = crtc->dev;
Tomi Valkeinen6f206e92014-02-07 17:37:07 +0000635
Rob Clark16ea9752013-01-08 15:04:28 -0600636 if (tilcdc_crtc->event) {
637 dev_err(dev->dev, "already pending page flip!\n");
638 return -EBUSY;
639 }
640
Cihangir Akturk61dd13b2017-08-03 14:58:38 +0300641 drm_framebuffer_get(fb);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300642
Matt Roperf4510a22014-04-01 15:22:40 -0700643 crtc->primary->fb = fb;
Jyri Sarha11abbc92017-03-01 10:30:28 +0200644 tilcdc_crtc->event = event;
Tomi Valkeinen65734a22015-10-19 12:30:03 +0300645
Jyri Sarha11abbc92017-03-01 10:30:28 +0200646 mutex_lock(&tilcdc_crtc->enable_lock);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300647
Jyri Sarha11abbc92017-03-01 10:30:28 +0200648 if (tilcdc_crtc->enabled) {
649 unsigned long flags;
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300650 ktime_t next_vblank;
651 s64 tdiff;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300652
Jyri Sarha11abbc92017-03-01 10:30:28 +0200653 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200654
Jyri Sarha11abbc92017-03-01 10:30:28 +0200655 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
Jyri Sarhace99f722017-10-12 12:19:46 +0300656 tilcdc_crtc->hvtotal_us);
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300657 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
658
659 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
660 tilcdc_crtc->next_fb = fb;
Jyri Sarha11abbc92017-03-01 10:30:28 +0200661 else
662 set_scanout(crtc, fb);
663
664 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300665 }
666
Jyri Sarha11abbc92017-03-01 10:30:28 +0200667 mutex_unlock(&tilcdc_crtc->enable_lock);
Rob Clark16ea9752013-01-08 15:04:28 -0600668
669 return 0;
670}
671
Rob Clark16ea9752013-01-08 15:04:28 -0600672static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
673 const struct drm_display_mode *mode,
674 struct drm_display_mode *adjusted_mode)
675{
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200676 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
677
678 if (!tilcdc_crtc->simulate_vesa_sync)
679 return true;
680
681 /*
682 * tilcdc does not generate VESA-compliant sync but aligns
683 * VS on the second edge of HS instead of first edge.
684 * We use adjusted_mode, to fixup sync by aligning both rising
685 * edges and add HSKEW offset to fix the sync.
686 */
687 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
688 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
689
690 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
691 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
692 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
693 } else {
694 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
695 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
696 }
697
Rob Clark16ea9752013-01-08 15:04:28 -0600698 return true;
699}
700
Jyri Sarhadb380c52016-04-07 15:10:23 +0300701static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
702 struct drm_crtc_state *state)
703{
704 struct drm_display_mode *mode = &state->mode;
705 int ret;
706
707 /* If we are not active we don't care */
708 if (!state->active)
709 return 0;
710
711 if (state->state->planes[0].ptr != crtc->primary ||
712 state->state->planes[0].state == NULL ||
713 state->state->planes[0].state->crtc != crtc) {
714 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
715 return -EINVAL;
716 }
717
718 ret = tilcdc_crtc_mode_valid(crtc, mode);
719 if (ret) {
720 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
721 return -EINVAL;
722 }
723
724 return 0;
725}
726
Shawn Guo55cbc4d2017-02-07 17:16:33 +0800727static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
728{
729 return 0;
730}
731
732static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
733{
734}
735
Jyri Sarha46a956a2017-05-26 13:20:17 +0300736static void tilcdc_crtc_reset(struct drm_crtc *crtc)
737{
738 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
739 struct drm_device *dev = crtc->dev;
740 int ret;
741
742 drm_atomic_helper_crtc_reset(crtc);
743
744 /* Turn the raster off if it for some reason is on. */
745 pm_runtime_get_sync(dev->dev);
746 if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
747 /* Enable DMA Frame Done Interrupt */
748 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
749 tilcdc_clear_irqstatus(dev, 0xffffffff);
750
751 tilcdc_crtc->frame_done = false;
752 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
753
754 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
755 tilcdc_crtc->frame_done,
756 msecs_to_jiffies(500));
757 if (ret == 0)
758 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
759 __func__);
760 }
761 pm_runtime_put_sync(dev->dev);
762}
763
Rob Clark16ea9752013-01-08 15:04:28 -0600764static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
Jyri Sarha305198d2016-04-07 15:05:16 +0300765 .destroy = tilcdc_crtc_destroy,
766 .set_config = drm_atomic_helper_set_config,
767 .page_flip = drm_atomic_helper_page_flip,
Jyri Sarha46a956a2017-05-26 13:20:17 +0300768 .reset = tilcdc_crtc_reset,
Jyri Sarha305198d2016-04-07 15:05:16 +0300769 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
770 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
Shawn Guo55cbc4d2017-02-07 17:16:33 +0800771 .enable_vblank = tilcdc_crtc_enable_vblank,
772 .disable_vblank = tilcdc_crtc_disable_vblank,
Rob Clark16ea9752013-01-08 15:04:28 -0600773};
774
775static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
Rob Clark16ea9752013-01-08 15:04:28 -0600776 .mode_fixup = tilcdc_crtc_mode_fixup,
Jyri Sarhadb380c52016-04-07 15:10:23 +0300777 .atomic_check = tilcdc_crtc_atomic_check,
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300778 .atomic_enable = tilcdc_crtc_atomic_enable,
Laurent Pinchart64581712017-06-30 12:36:45 +0300779 .atomic_disable = tilcdc_crtc_atomic_disable,
Rob Clark16ea9752013-01-08 15:04:28 -0600780};
781
782int tilcdc_crtc_max_width(struct drm_crtc *crtc)
783{
784 struct drm_device *dev = crtc->dev;
785 struct tilcdc_drm_private *priv = dev->dev_private;
786 int max_width = 0;
787
788 if (priv->rev == 1)
789 max_width = 1024;
790 else if (priv->rev == 2)
791 max_width = 2048;
792
793 return max_width;
794}
795
796int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
797{
798 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
799 unsigned int bandwidth;
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500800 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
Rob Clark16ea9752013-01-08 15:04:28 -0600801
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500802 /*
803 * check to see if the width is within the range that
804 * the LCD Controller physically supports
805 */
Rob Clark16ea9752013-01-08 15:04:28 -0600806 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
807 return MODE_VIRTUAL_X;
808
809 /* width must be multiple of 16 */
810 if (mode->hdisplay & 0xf)
811 return MODE_VIRTUAL_X;
812
813 if (mode->vdisplay > 2048)
814 return MODE_VIRTUAL_Y;
815
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500816 DBG("Processing mode %dx%d@%d with pixel clock %d",
817 mode->hdisplay, mode->vdisplay,
818 drm_mode_vrefresh(mode), mode->clock);
819
820 hbp = mode->htotal - mode->hsync_end;
821 hfp = mode->hsync_start - mode->hdisplay;
822 hsw = mode->hsync_end - mode->hsync_start;
823 vbp = mode->vtotal - mode->vsync_end;
824 vfp = mode->vsync_start - mode->vdisplay;
825 vsw = mode->vsync_end - mode->vsync_start;
826
827 if ((hbp-1) & ~0x3ff) {
828 DBG("Pruning mode: Horizontal Back Porch out of range");
829 return MODE_HBLANK_WIDE;
830 }
831
832 if ((hfp-1) & ~0x3ff) {
833 DBG("Pruning mode: Horizontal Front Porch out of range");
834 return MODE_HBLANK_WIDE;
835 }
836
837 if ((hsw-1) & ~0x3ff) {
838 DBG("Pruning mode: Horizontal Sync Width out of range");
839 return MODE_HSYNC_WIDE;
840 }
841
842 if (vbp & ~0xff) {
843 DBG("Pruning mode: Vertical Back Porch out of range");
844 return MODE_VBLANK_WIDE;
845 }
846
847 if (vfp & ~0xff) {
848 DBG("Pruning mode: Vertical Front Porch out of range");
849 return MODE_VBLANK_WIDE;
850 }
851
852 if ((vsw-1) & ~0x3f) {
853 DBG("Pruning mode: Vertical Sync Width out of range");
854 return MODE_VSYNC_WIDE;
855 }
856
Darren Etheridge4e564342013-06-21 13:52:23 -0500857 /*
858 * some devices have a maximum allowed pixel clock
859 * configured from the DT
860 */
861 if (mode->clock > priv->max_pixelclock) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500862 DBG("Pruning mode: pixel clock too high");
Darren Etheridge4e564342013-06-21 13:52:23 -0500863 return MODE_CLOCK_HIGH;
864 }
865
866 /*
867 * some devices further limit the max horizontal resolution
868 * configured from the DT
869 */
870 if (mode->hdisplay > priv->max_width)
871 return MODE_BAD_WIDTH;
872
Rob Clark16ea9752013-01-08 15:04:28 -0600873 /* filter out modes that would require too much memory bandwidth: */
Darren Etheridge4e564342013-06-21 13:52:23 -0500874 bandwidth = mode->hdisplay * mode->vdisplay *
875 drm_mode_vrefresh(mode);
876 if (bandwidth > priv->max_bandwidth) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500877 DBG("Pruning mode: exceeds defined bandwidth limit");
Rob Clark16ea9752013-01-08 15:04:28 -0600878 return MODE_BAD;
Darren Etheridge4e564342013-06-21 13:52:23 -0500879 }
Rob Clark16ea9752013-01-08 15:04:28 -0600880
881 return MODE_OK;
882}
883
884void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
885 const struct tilcdc_panel_info *info)
886{
887 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
888 tilcdc_crtc->info = info;
889}
890
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200891void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
892 bool simulate_vesa_sync)
893{
894 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
895
896 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
897}
898
Rob Clark16ea9752013-01-08 15:04:28 -0600899void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
900{
Rob Clark16ea9752013-01-08 15:04:28 -0600901 struct drm_device *dev = crtc->dev;
902 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarha642e5162016-09-06 16:19:54 +0300903 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600904
Daniel Vetter33e5b662017-03-22 22:50:47 +0100905 drm_modeset_lock(&crtc->mutex, NULL);
Jyri Sarha642e5162016-09-06 16:19:54 +0300906 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
907 if (tilcdc_crtc_is_on(crtc)) {
908 pm_runtime_get_sync(dev->dev);
909 tilcdc_crtc_disable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600910
Jyri Sarha642e5162016-09-06 16:19:54 +0300911 tilcdc_crtc_set_clk(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600912
Jyri Sarha642e5162016-09-06 16:19:54 +0300913 tilcdc_crtc_enable(crtc);
914 pm_runtime_put_sync(dev->dev);
915 }
Rob Clark16ea9752013-01-08 15:04:28 -0600916 }
Daniel Vetter33e5b662017-03-22 22:50:47 +0100917 drm_modeset_unlock(&crtc->mutex);
Rob Clark16ea9752013-01-08 15:04:28 -0600918}
919
Jyri Sarha5895d082016-01-08 14:33:09 +0200920#define SYNC_LOST_COUNT_LIMIT 50
921
Rob Clark16ea9752013-01-08 15:04:28 -0600922irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
923{
924 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
925 struct drm_device *dev = crtc->dev;
926 struct tilcdc_drm_private *priv = dev->dev_private;
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100927 uint32_t stat, reg;
Rob Clark16ea9752013-01-08 15:04:28 -0600928
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300929 stat = tilcdc_read_irqstatus(dev);
930 tilcdc_clear_irqstatus(dev, stat);
931
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300932 if (stat & LCDC_END_OF_FRAME0) {
Rob Clark16ea9752013-01-08 15:04:28 -0600933 unsigned long flags;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200934 bool skip_event = false;
935 ktime_t now;
936
937 now = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600938
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300939 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600940
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200941 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600942
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200943 tilcdc_crtc->last_vblank = now;
Rob Clark16ea9752013-01-08 15:04:28 -0600944
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200945 if (tilcdc_crtc->next_fb) {
946 set_scanout(crtc, tilcdc_crtc->next_fb);
947 tilcdc_crtc->next_fb = NULL;
948 skip_event = true;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300949 }
950
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200951 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
952
Gustavo Padovan099ede82016-07-04 21:04:52 -0300953 drm_crtc_handle_vblank(crtc);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200954
955 if (!skip_event) {
956 struct drm_pending_vblank_event *event;
957
958 spin_lock_irqsave(&dev->event_lock, flags);
959
960 event = tilcdc_crtc->event;
961 tilcdc_crtc->event = NULL;
962 if (event)
Gustavo Padovandfebc152016-04-14 10:48:22 -0700963 drm_crtc_send_vblank_event(crtc, event);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200964
965 spin_unlock_irqrestore(&dev->event_lock, flags);
966 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200967
968 if (tilcdc_crtc->frame_intact)
969 tilcdc_crtc->sync_lost_count = 0;
970 else
971 tilcdc_crtc->frame_intact = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600972 }
973
Jyri Sarha14944112016-04-07 20:36:48 +0300974 if (stat & LCDC_FIFO_UNDERFLOW)
Daniel Schultzd7014532016-10-28 13:52:42 +0200975 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
Jyri Sarha14944112016-04-07 20:36:48 +0300976 __func__, stat);
977
Jyri Sarha55e165c2016-11-15 23:37:24 +0200978 if (stat & LCDC_PL_LOAD_DONE) {
979 complete(&tilcdc_crtc->palette_loaded);
980 if (priv->rev == 1)
981 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
982 LCDC_V1_PL_INT_ENA);
983 else
984 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
985 LCDC_V2_PL_INT_ENA);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100986 }
987
Jyri Sarhacba88442016-11-16 00:12:27 +0200988 if (stat & LCDC_SYNC_LOST) {
989 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
990 __func__, stat);
991 tilcdc_crtc->frame_intact = false;
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100992 if (priv->rev == 1) {
993 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
994 if (reg & LCDC_RASTER_ENABLE) {
Jyri Sarhacba88442016-11-16 00:12:27 +0200995 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100996 LCDC_RASTER_ENABLE);
997 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
998 LCDC_RASTER_ENABLE);
999 }
1000 } else {
1001 if (tilcdc_crtc->sync_lost_count++ >
1002 SYNC_LOST_COUNT_LIMIT) {
1003 dev_err(dev->dev,
1004 "%s(0x%08x): Sync lost flood detected, recovering",
1005 __func__, stat);
1006 queue_work(system_wq,
1007 &tilcdc_crtc->recover_work);
Jyri Sarhacba88442016-11-16 00:12:27 +02001008 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
1009 LCDC_SYNC_LOST);
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +01001010 tilcdc_crtc->sync_lost_count = 0;
1011 }
Jyri Sarhacba88442016-11-16 00:12:27 +02001012 }
1013 }
1014
Jyri Sarha36725832016-11-21 18:30:19 +02001015 if (stat & LCDC_FRAME_DONE) {
1016 tilcdc_crtc->frame_done = true;
1017 wake_up(&tilcdc_crtc->frame_done_wq);
1018 /* rev 1 lcdc appears to hang if irq is not disbaled here */
1019 if (priv->rev == 1)
1020 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
1021 LCDC_V1_FRAME_DONE_INT_ENA);
1022 }
1023
Jyri Sarha14944112016-04-07 20:36:48 +03001024 /* For revision 2 only */
Rob Clark16ea9752013-01-08 15:04:28 -06001025 if (priv->rev == 2) {
Jyri Sarha14944112016-04-07 20:36:48 +03001026 /* Indicate to LCDC that the interrupt service routine has
1027 * completed, see 13.3.6.1.6 in AM335x TRM.
1028 */
1029 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
1030 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +02001031
Rob Clark16ea9752013-01-08 15:04:28 -06001032 return IRQ_HANDLED;
1033}
1034
Jyri Sarha9963d362016-11-15 22:56:46 +02001035int tilcdc_crtc_create(struct drm_device *dev)
Rob Clark16ea9752013-01-08 15:04:28 -06001036{
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001037 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -06001038 struct tilcdc_crtc *tilcdc_crtc;
1039 struct drm_crtc *crtc;
1040 int ret;
1041
Jyri Sarhad0ec32c2016-02-23 12:44:27 +02001042 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
Rob Clark16ea9752013-01-08 15:04:28 -06001043 if (!tilcdc_crtc) {
1044 dev_err(dev->dev, "allocation failed\n");
Jyri Sarha9963d362016-11-15 22:56:46 +02001045 return -ENOMEM;
Rob Clark16ea9752013-01-08 15:04:28 -06001046 }
1047
Jyri Sarha55e165c2016-11-15 23:37:24 +02001048 init_completion(&tilcdc_crtc->palette_loaded);
1049 tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
1050 TILCDC_PALETTE_SIZE,
Bartosz Golaszewski93452352016-10-31 15:19:26 +01001051 &tilcdc_crtc->palette_dma_handle,
1052 GFP_KERNEL | __GFP_ZERO);
Jyri Sarha55e165c2016-11-15 23:37:24 +02001053 if (!tilcdc_crtc->palette_base)
1054 return -ENOMEM;
1055 *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
Bartosz Golaszewski93452352016-10-31 15:19:26 +01001056
Rob Clark16ea9752013-01-08 15:04:28 -06001057 crtc = &tilcdc_crtc->base;
1058
Jyri Sarha47f571c2016-04-07 15:04:18 +03001059 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
1060 if (ret < 0)
1061 goto fail;
1062
Jyri Sarha2d53a182016-10-25 12:27:31 +03001063 mutex_init(&tilcdc_crtc->enable_lock);
1064
Rob Clark16ea9752013-01-08 15:04:28 -06001065 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
1066
Boris BREZILLONd7f8db52014-11-14 19:30:30 +01001067 drm_flip_work_init(&tilcdc_crtc->unref_work,
Rob Clarka464d612013-08-07 13:41:20 -04001068 "unref", unref_worker);
Rob Clark16ea9752013-01-08 15:04:28 -06001069
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +02001070 spin_lock_init(&tilcdc_crtc->irq_lock);
Jyri Sarha13b3d722016-04-06 14:02:38 +03001071 INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +02001072
Jyri Sarha47f571c2016-04-07 15:04:18 +03001073 ret = drm_crtc_init_with_planes(dev, crtc,
1074 &tilcdc_crtc->primary,
1075 NULL,
1076 &tilcdc_crtc_funcs,
1077 "tilcdc crtc");
Rob Clark16ea9752013-01-08 15:04:28 -06001078 if (ret < 0)
1079 goto fail;
1080
1081 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
1082
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001083 if (priv->is_componentized) {
Rob Herring86418f92017-03-22 08:26:06 -05001084 crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001085 if (!crtc->port) { /* This should never happen */
Rob Herring4bf99142017-07-18 16:43:04 -05001086 dev_err(dev->dev, "Port node not found in %pOF\n",
1087 dev->dev->of_node);
Jyri Sarha9963d362016-11-15 22:56:46 +02001088 ret = -EINVAL;
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001089 goto fail;
1090 }
1091 }
1092
Jyri Sarha9963d362016-11-15 22:56:46 +02001093 priv->crtc = crtc;
1094 return 0;
Rob Clark16ea9752013-01-08 15:04:28 -06001095
1096fail:
1097 tilcdc_crtc_destroy(crtc);
Jyri Sarhaabf83152017-01-31 16:18:42 +02001098 return ret;
Rob Clark16ea9752013-01-08 15:04:28 -06001099}