blob: b7d7bd6e3d60d63f7d80c0c71915820aa09c60f0 [file] [log] [blame]
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001/*
2 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15#include <linux/module.h>
16#include <linux/export.h>
17#include <linux/types.h>
Philipp Zabel6c641552013-03-28 17:35:21 +010018#include <linux/reset.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020019#include <linux/platform_device.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/clk.h>
26#include <linux/list.h>
27#include <linux/irq.h>
Catalin Marinasde88cbb2013-01-18 15:31:37 +000028#include <linux/irqchip/chained_irq.h>
Philipp Zabelb7287662013-06-21 10:27:39 +020029#include <linux/irqdomain.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020030#include <linux/of_device.h>
Philipp Zabel304e6be2015-11-09 16:35:12 +010031#include <linux/of_graph.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020032
Philipp Zabel7cb17792013-10-10 16:18:38 +020033#include <drm/drm_fourcc.h>
34
Philipp Zabel39b90042013-09-30 16:13:39 +020035#include <video/imx-ipu-v3.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020036#include "ipu-prv.h"
37
38static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
39{
40 return readl(ipu->cm_reg + offset);
41}
42
43static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
44{
45 writel(value, ipu->cm_reg + offset);
46}
47
Steve Longerbeam572a7612016-07-19 18:11:02 -070048int ipu_get_num(struct ipu_soc *ipu)
49{
50 return ipu->id;
51}
52EXPORT_SYMBOL_GPL(ipu_get_num);
53
Sascha Haueraecfbdb2012-09-21 10:07:49 +020054void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
55{
56 u32 val;
57
58 val = ipu_cm_read(ipu, IPU_SRM_PRI2);
59 val |= 0x8;
60 ipu_cm_write(ipu, val, IPU_SRM_PRI2);
61}
62EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
63
Philipp Zabel7cb17792013-10-10 16:18:38 +020064enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
65{
66 switch (drm_fourcc) {
Philipp Zabel0cb8b752014-12-12 13:40:14 +010067 case DRM_FORMAT_ARGB1555:
68 case DRM_FORMAT_ABGR1555:
69 case DRM_FORMAT_RGBA5551:
70 case DRM_FORMAT_BGRA5551:
Philipp Zabel7cb17792013-10-10 16:18:38 +020071 case DRM_FORMAT_RGB565:
72 case DRM_FORMAT_BGR565:
73 case DRM_FORMAT_RGB888:
74 case DRM_FORMAT_BGR888:
Lucas Stach7d2e8a22015-08-04 17:21:04 +020075 case DRM_FORMAT_ARGB4444:
Philipp Zabel7cb17792013-10-10 16:18:38 +020076 case DRM_FORMAT_XRGB8888:
77 case DRM_FORMAT_XBGR8888:
78 case DRM_FORMAT_RGBX8888:
79 case DRM_FORMAT_BGRX8888:
80 case DRM_FORMAT_ARGB8888:
81 case DRM_FORMAT_ABGR8888:
82 case DRM_FORMAT_RGBA8888:
83 case DRM_FORMAT_BGRA8888:
84 return IPUV3_COLORSPACE_RGB;
85 case DRM_FORMAT_YUYV:
86 case DRM_FORMAT_UYVY:
87 case DRM_FORMAT_YUV420:
88 case DRM_FORMAT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -070089 case DRM_FORMAT_YUV422:
90 case DRM_FORMAT_YVU422:
Philipp Zabelc9d508c2016-10-18 13:36:33 +020091 case DRM_FORMAT_YUV444:
92 case DRM_FORMAT_YVU444:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -070093 case DRM_FORMAT_NV12:
94 case DRM_FORMAT_NV21:
95 case DRM_FORMAT_NV16:
96 case DRM_FORMAT_NV61:
Philipp Zabel7cb17792013-10-10 16:18:38 +020097 return IPUV3_COLORSPACE_YUV;
98 default:
99 return IPUV3_COLORSPACE_UNKNOWN;
100 }
101}
102EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
103
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200104enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
105{
106 switch (pixelformat) {
107 case V4L2_PIX_FMT_YUV420:
Philipp Zabeld3e4e612012-11-12 16:29:00 +0100108 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700109 case V4L2_PIX_FMT_YUV422P:
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200110 case V4L2_PIX_FMT_UYVY:
Michael Olbrichc096ae12012-11-12 16:28:59 +0100111 case V4L2_PIX_FMT_YUYV:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700112 case V4L2_PIX_FMT_NV12:
113 case V4L2_PIX_FMT_NV21:
114 case V4L2_PIX_FMT_NV16:
115 case V4L2_PIX_FMT_NV61:
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200116 return IPUV3_COLORSPACE_YUV;
117 case V4L2_PIX_FMT_RGB32:
118 case V4L2_PIX_FMT_BGR32:
119 case V4L2_PIX_FMT_RGB24:
120 case V4L2_PIX_FMT_BGR24:
121 case V4L2_PIX_FMT_RGB565:
122 return IPUV3_COLORSPACE_RGB;
123 default:
124 return IPUV3_COLORSPACE_UNKNOWN;
125 }
126}
127EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
128
Steve Longerbeam4cea9402014-06-25 18:05:38 -0700129bool ipu_pixelformat_is_planar(u32 pixelformat)
130{
131 switch (pixelformat) {
132 case V4L2_PIX_FMT_YUV420:
133 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700134 case V4L2_PIX_FMT_YUV422P:
135 case V4L2_PIX_FMT_NV12:
136 case V4L2_PIX_FMT_NV21:
137 case V4L2_PIX_FMT_NV16:
138 case V4L2_PIX_FMT_NV61:
Steve Longerbeam4cea9402014-06-25 18:05:38 -0700139 return true;
140 }
141
142 return false;
143}
144EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
145
Steve Longerbeamae0e9702014-06-25 18:05:36 -0700146enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
147{
148 switch (mbus_code & 0xf000) {
149 case 0x1000:
150 return IPUV3_COLORSPACE_RGB;
151 case 0x2000:
152 return IPUV3_COLORSPACE_YUV;
153 default:
154 return IPUV3_COLORSPACE_UNKNOWN;
155 }
156}
157EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
158
Steve Longerbeam6930afd2014-06-25 18:05:43 -0700159int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
160{
161 switch (pixelformat) {
162 case V4L2_PIX_FMT_YUV420:
163 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700164 case V4L2_PIX_FMT_YUV422P:
165 case V4L2_PIX_FMT_NV12:
166 case V4L2_PIX_FMT_NV21:
167 case V4L2_PIX_FMT_NV16:
168 case V4L2_PIX_FMT_NV61:
Steve Longerbeam6930afd2014-06-25 18:05:43 -0700169 /*
170 * for the planar YUV formats, the stride passed to
171 * cpmem must be the stride in bytes of the Y plane.
172 * And all the planar YUV formats have an 8-bit
173 * Y component.
174 */
175 return (8 * pixel_stride) >> 3;
176 case V4L2_PIX_FMT_RGB565:
177 case V4L2_PIX_FMT_YUYV:
178 case V4L2_PIX_FMT_UYVY:
179 return (16 * pixel_stride) >> 3;
180 case V4L2_PIX_FMT_BGR24:
181 case V4L2_PIX_FMT_RGB24:
182 return (24 * pixel_stride) >> 3;
183 case V4L2_PIX_FMT_BGR32:
184 case V4L2_PIX_FMT_RGB32:
185 return (32 * pixel_stride) >> 3;
186 default:
187 break;
188 }
189
190 return -EINVAL;
191}
192EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
193
Steve Longerbeamf835f382014-06-25 18:05:37 -0700194int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
195 bool hflip, bool vflip)
196{
197 u32 r90, vf, hf;
198
199 switch (degrees) {
200 case 0:
201 vf = hf = r90 = 0;
202 break;
203 case 90:
204 vf = hf = 0;
205 r90 = 1;
206 break;
207 case 180:
208 vf = hf = 1;
209 r90 = 0;
210 break;
211 case 270:
212 vf = hf = r90 = 1;
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 hf ^= (u32)hflip;
219 vf ^= (u32)vflip;
220
221 *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
222 return 0;
223}
224EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
225
226int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
227 bool hflip, bool vflip)
228{
229 u32 r90, vf, hf;
230
231 r90 = ((u32)mode >> 2) & 0x1;
232 hf = ((u32)mode >> 1) & 0x1;
233 vf = ((u32)mode >> 0) & 0x1;
234 hf ^= (u32)hflip;
235 vf ^= (u32)vflip;
236
237 switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
238 case IPU_ROTATE_NONE:
239 *degrees = 0;
240 break;
241 case IPU_ROTATE_90_RIGHT:
242 *degrees = 90;
243 break;
244 case IPU_ROTATE_180:
245 *degrees = 180;
246 break;
247 case IPU_ROTATE_90_LEFT:
248 *degrees = 270;
249 break;
250 default:
251 return -EINVAL;
252 }
253
254 return 0;
255}
256EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
257
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200258struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
259{
260 struct ipuv3_channel *channel;
261
262 dev_dbg(ipu->dev, "%s %d\n", __func__, num);
263
264 if (num > 63)
265 return ERR_PTR(-ENODEV);
266
267 mutex_lock(&ipu->channel_lock);
268
269 channel = &ipu->channel[num];
270
271 if (channel->busy) {
272 channel = ERR_PTR(-EBUSY);
273 goto out;
274 }
275
Valentina Manea89bc5be2013-10-25 11:52:20 +0300276 channel->busy = true;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200277 channel->num = num;
278
279out:
280 mutex_unlock(&ipu->channel_lock);
281
282 return channel;
283}
284EXPORT_SYMBOL_GPL(ipu_idmac_get);
285
286void ipu_idmac_put(struct ipuv3_channel *channel)
287{
288 struct ipu_soc *ipu = channel->ipu;
289
290 dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
291
292 mutex_lock(&ipu->channel_lock);
293
Valentina Manea89bc5be2013-10-25 11:52:20 +0300294 channel->busy = false;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200295
296 mutex_unlock(&ipu->channel_lock);
297}
298EXPORT_SYMBOL_GPL(ipu_idmac_put);
299
Steve Longerbeamaa52f572014-06-25 18:05:40 -0700300#define idma_mask(ch) (1 << ((ch) & 0x1f))
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200301
Steve Longerbeame7268c62014-06-25 18:05:42 -0700302/*
303 * This is an undocumented feature, a write one to a channel bit in
304 * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
305 * internal current buffer pointer so that transfers start from buffer
306 * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
307 * only says these are read-only registers). This operation is required
308 * for channel linking to work correctly, for instance video capture
309 * pipelines that carry out image rotations will fail after the first
310 * streaming unless this function is called for each channel before
311 * re-enabling the channels.
312 */
313static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
314{
315 struct ipu_soc *ipu = channel->ipu;
316 unsigned int chno = channel->num;
317
318 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
319}
320
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200321void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
322 bool doublebuffer)
323{
324 struct ipu_soc *ipu = channel->ipu;
325 unsigned long flags;
326 u32 reg;
327
328 spin_lock_irqsave(&ipu->lock, flags);
329
330 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
331 if (doublebuffer)
332 reg |= idma_mask(channel->num);
333 else
334 reg &= ~idma_mask(channel->num);
335 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
336
Steve Longerbeame7268c62014-06-25 18:05:42 -0700337 __ipu_idmac_reset_current_buffer(channel);
338
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200339 spin_unlock_irqrestore(&ipu->lock, flags);
340}
341EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
342
Steve Longerbeam4fd1a072014-06-25 18:05:45 -0700343static const struct {
344 int chnum;
345 u32 reg;
346 int shift;
347} idmac_lock_en_info[] = {
348 { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
349 { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
350 { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
351 { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
352 { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
353 { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
354 { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
355 { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
356 { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
357 { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
358 { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
359 { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
360 { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
361 { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
362 { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
363 { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
364 { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
365};
366
367int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
368{
369 struct ipu_soc *ipu = channel->ipu;
370 unsigned long flags;
371 u32 bursts, regval;
372 int i;
373
374 switch (num_bursts) {
375 case 0:
376 case 1:
377 bursts = 0x00; /* locking disabled */
378 break;
379 case 2:
380 bursts = 0x01;
381 break;
382 case 4:
383 bursts = 0x02;
384 break;
385 case 8:
386 bursts = 0x03;
387 break;
388 default:
389 return -EINVAL;
390 }
391
392 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
393 if (channel->num == idmac_lock_en_info[i].chnum)
394 break;
395 }
396 if (i >= ARRAY_SIZE(idmac_lock_en_info))
397 return -EINVAL;
398
399 spin_lock_irqsave(&ipu->lock, flags);
400
401 regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
402 regval &= ~(0x03 << idmac_lock_en_info[i].shift);
403 regval |= (bursts << idmac_lock_en_info[i].shift);
404 ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
405
406 spin_unlock_irqrestore(&ipu->lock, flags);
407
408 return 0;
409}
410EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
411
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200412int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
413{
414 unsigned long lock_flags;
415 u32 val;
416
417 spin_lock_irqsave(&ipu->lock, lock_flags);
418
419 val = ipu_cm_read(ipu, IPU_DISP_GEN);
420
421 if (mask & IPU_CONF_DI0_EN)
422 val |= IPU_DI0_COUNTER_RELEASE;
423 if (mask & IPU_CONF_DI1_EN)
424 val |= IPU_DI1_COUNTER_RELEASE;
425
426 ipu_cm_write(ipu, val, IPU_DISP_GEN);
427
428 val = ipu_cm_read(ipu, IPU_CONF);
429 val |= mask;
430 ipu_cm_write(ipu, val, IPU_CONF);
431
432 spin_unlock_irqrestore(&ipu->lock, lock_flags);
433
434 return 0;
435}
436EXPORT_SYMBOL_GPL(ipu_module_enable);
437
438int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
439{
440 unsigned long lock_flags;
441 u32 val;
442
443 spin_lock_irqsave(&ipu->lock, lock_flags);
444
445 val = ipu_cm_read(ipu, IPU_CONF);
446 val &= ~mask;
447 ipu_cm_write(ipu, val, IPU_CONF);
448
449 val = ipu_cm_read(ipu, IPU_DISP_GEN);
450
451 if (mask & IPU_CONF_DI0_EN)
452 val &= ~IPU_DI0_COUNTER_RELEASE;
453 if (mask & IPU_CONF_DI1_EN)
454 val &= ~IPU_DI1_COUNTER_RELEASE;
455
456 ipu_cm_write(ipu, val, IPU_DISP_GEN);
457
458 spin_unlock_irqrestore(&ipu->lock, lock_flags);
459
460 return 0;
461}
462EXPORT_SYMBOL_GPL(ipu_module_disable);
463
Philipp Zabele9046092012-05-16 17:28:29 +0200464int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
465{
466 struct ipu_soc *ipu = channel->ipu;
467 unsigned int chno = channel->num;
468
469 return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
470}
471EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
472
Steve Longerbeamaa52f572014-06-25 18:05:40 -0700473bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
474{
475 struct ipu_soc *ipu = channel->ipu;
476 unsigned long flags;
477 u32 reg = 0;
478
479 spin_lock_irqsave(&ipu->lock, flags);
480 switch (buf_num) {
481 case 0:
482 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
483 break;
484 case 1:
485 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
486 break;
487 case 2:
488 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
489 break;
490 }
491 spin_unlock_irqrestore(&ipu->lock, flags);
492
493 return ((reg & idma_mask(channel->num)) != 0);
494}
495EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
496
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200497void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
498{
499 struct ipu_soc *ipu = channel->ipu;
500 unsigned int chno = channel->num;
501 unsigned long flags;
502
503 spin_lock_irqsave(&ipu->lock, flags);
504
505 /* Mark buffer as ready. */
506 if (buf_num == 0)
507 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
508 else
509 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
510
511 spin_unlock_irqrestore(&ipu->lock, flags);
512}
513EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
514
Steve Longerbeambce6f082014-06-25 18:05:41 -0700515void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
516{
517 struct ipu_soc *ipu = channel->ipu;
518 unsigned int chno = channel->num;
519 unsigned long flags;
520
521 spin_lock_irqsave(&ipu->lock, flags);
522
523 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
524 switch (buf_num) {
525 case 0:
526 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
527 break;
528 case 1:
529 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
530 break;
531 case 2:
532 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
533 break;
534 default:
535 break;
536 }
537 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
538
539 spin_unlock_irqrestore(&ipu->lock, flags);
540}
541EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
542
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200543int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
544{
545 struct ipu_soc *ipu = channel->ipu;
546 u32 val;
547 unsigned long flags;
548
549 spin_lock_irqsave(&ipu->lock, flags);
550
551 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
552 val |= idma_mask(channel->num);
553 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
554
555 spin_unlock_irqrestore(&ipu->lock, flags);
556
557 return 0;
558}
559EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
560
Philipp Zabel17075502014-04-14 23:53:17 +0200561bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
562{
563 return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
564}
565EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
566
Sascha Hauerfb822a32013-10-10 16:18:41 +0200567int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
568{
569 struct ipu_soc *ipu = channel->ipu;
570 unsigned long timeout;
571
572 timeout = jiffies + msecs_to_jiffies(ms);
573 while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
574 idma_mask(channel->num)) {
575 if (time_after(jiffies, timeout))
576 return -ETIMEDOUT;
577 cpu_relax();
578 }
579
580 return 0;
581}
582EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
583
Philipp Zabel17075502014-04-14 23:53:17 +0200584int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
585{
586 unsigned long timeout;
587
588 timeout = jiffies + msecs_to_jiffies(ms);
589 ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
590 while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
591 if (time_after(jiffies, timeout))
592 return -ETIMEDOUT;
593 cpu_relax();
594 }
595
596 return 0;
597}
598EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
599
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200600int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
601{
602 struct ipu_soc *ipu = channel->ipu;
603 u32 val;
604 unsigned long flags;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200605
606 spin_lock_irqsave(&ipu->lock, flags);
607
608 /* Disable DMA channel(s) */
609 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
610 val &= ~idma_mask(channel->num);
611 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
612
Steve Longerbeame7268c62014-06-25 18:05:42 -0700613 __ipu_idmac_reset_current_buffer(channel);
614
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200615 /* Set channel buffers NOT to be ready */
616 ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
617
618 if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
619 idma_mask(channel->num)) {
620 ipu_cm_write(ipu, idma_mask(channel->num),
621 IPU_CHA_BUF0_RDY(channel->num));
622 }
623
624 if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
625 idma_mask(channel->num)) {
626 ipu_cm_write(ipu, idma_mask(channel->num),
627 IPU_CHA_BUF1_RDY(channel->num));
628 }
629
630 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
631
632 /* Reset the double buffer */
633 val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
634 val &= ~idma_mask(channel->num);
635 ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
636
637 spin_unlock_irqrestore(&ipu->lock, flags);
638
639 return 0;
640}
641EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
642
Steve Longerbeam2bcf5772014-06-25 18:05:44 -0700643/*
644 * The imx6 rev. D TRM says that enabling the WM feature will increase
645 * a channel's priority. Refer to Table 36-8 Calculated priority value.
646 * The sub-module that is the sink or source for the channel must enable
647 * watermark signal for this to take effect (SMFC_WM for instance).
648 */
649void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
650{
651 struct ipu_soc *ipu = channel->ipu;
652 unsigned long flags;
653 u32 val;
654
655 spin_lock_irqsave(&ipu->lock, flags);
656
657 val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
658 if (enable)
659 val |= 1 << (channel->num % 32);
660 else
661 val &= ~(1 << (channel->num % 32));
662 ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
663
664 spin_unlock_irqrestore(&ipu->lock, flags);
665}
666EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
667
Philipp Zabel6c641552013-03-28 17:35:21 +0100668static int ipu_memory_reset(struct ipu_soc *ipu)
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200669{
670 unsigned long timeout;
671
672 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
673
674 timeout = jiffies + msecs_to_jiffies(1000);
675 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
676 if (time_after(jiffies, timeout))
677 return -ETIME;
678 cpu_relax();
679 }
680
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200681 return 0;
682}
683
Steve Longerbeamba079752014-06-25 18:05:30 -0700684/*
685 * Set the source mux for the given CSI. Selects either parallel or
686 * MIPI CSI2 sources.
687 */
688void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
689{
690 unsigned long flags;
691 u32 val, mask;
692
693 mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
694 IPU_CONF_CSI0_DATA_SOURCE;
695
696 spin_lock_irqsave(&ipu->lock, flags);
697
698 val = ipu_cm_read(ipu, IPU_CONF);
699 if (mipi_csi2)
700 val |= mask;
701 else
702 val &= ~mask;
703 ipu_cm_write(ipu, val, IPU_CONF);
704
705 spin_unlock_irqrestore(&ipu->lock, flags);
706}
707EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
708
709/*
710 * Set the source mux for the IC. Selects either CSI[01] or the VDI.
711 */
712void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
713{
714 unsigned long flags;
715 u32 val;
716
717 spin_lock_irqsave(&ipu->lock, flags);
718
719 val = ipu_cm_read(ipu, IPU_CONF);
720 if (vdi) {
721 val |= IPU_CONF_IC_INPUT;
722 } else {
723 val &= ~IPU_CONF_IC_INPUT;
724 if (csi_id == 1)
725 val |= IPU_CONF_CSI_SEL;
726 else
727 val &= ~IPU_CONF_CSI_SEL;
728 }
729 ipu_cm_write(ipu, val, IPU_CONF);
730
731 spin_unlock_irqrestore(&ipu->lock, flags);
732}
733EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
734
Steve Longerbeamac4708f2016-08-17 17:50:17 -0700735
736/* Frame Synchronization Unit Channel Linking */
737
738struct fsu_link_reg_info {
739 int chno;
740 u32 reg;
741 u32 mask;
742 u32 val;
743};
744
745struct fsu_link_info {
746 struct fsu_link_reg_info src;
747 struct fsu_link_reg_info sink;
748};
749
750static const struct fsu_link_info fsu_link_info[] = {
751 {
752 .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
753 FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
754 .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
755 FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
756 }, {
757 .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
758 FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
759 .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
760 FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
761 }, {
762 .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
763 FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
764 .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
765 FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
766 }, {
767 .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
768 .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
769 FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
770 },
771};
772
773static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
774{
775 int i;
776
777 for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
778 if (src == fsu_link_info[i].src.chno &&
779 sink == fsu_link_info[i].sink.chno)
780 return &fsu_link_info[i];
781 }
782
783 return NULL;
784}
785
786/*
787 * Links a source channel to a sink channel in the FSU.
788 */
789int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
790{
791 const struct fsu_link_info *link;
792 u32 src_reg, sink_reg;
793 unsigned long flags;
794
795 link = find_fsu_link_info(src_ch, sink_ch);
796 if (!link)
797 return -EINVAL;
798
799 spin_lock_irqsave(&ipu->lock, flags);
800
801 if (link->src.mask) {
802 src_reg = ipu_cm_read(ipu, link->src.reg);
803 src_reg &= ~link->src.mask;
804 src_reg |= link->src.val;
805 ipu_cm_write(ipu, src_reg, link->src.reg);
806 }
807
808 if (link->sink.mask) {
809 sink_reg = ipu_cm_read(ipu, link->sink.reg);
810 sink_reg &= ~link->sink.mask;
811 sink_reg |= link->sink.val;
812 ipu_cm_write(ipu, sink_reg, link->sink.reg);
813 }
814
815 spin_unlock_irqrestore(&ipu->lock, flags);
816 return 0;
817}
818EXPORT_SYMBOL_GPL(ipu_fsu_link);
819
820/*
821 * Unlinks source and sink channels in the FSU.
822 */
823int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
824{
825 const struct fsu_link_info *link;
826 u32 src_reg, sink_reg;
827 unsigned long flags;
828
829 link = find_fsu_link_info(src_ch, sink_ch);
830 if (!link)
831 return -EINVAL;
832
833 spin_lock_irqsave(&ipu->lock, flags);
834
835 if (link->src.mask) {
836 src_reg = ipu_cm_read(ipu, link->src.reg);
837 src_reg &= ~link->src.mask;
838 ipu_cm_write(ipu, src_reg, link->src.reg);
839 }
840
841 if (link->sink.mask) {
842 sink_reg = ipu_cm_read(ipu, link->sink.reg);
843 sink_reg &= ~link->sink.mask;
844 ipu_cm_write(ipu, sink_reg, link->sink.reg);
845 }
846
847 spin_unlock_irqrestore(&ipu->lock, flags);
848 return 0;
849}
850EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
851
852/* Link IDMAC channels in the FSU */
853int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
854{
855 return ipu_fsu_link(src->ipu, src->num, sink->num);
856}
857EXPORT_SYMBOL_GPL(ipu_idmac_link);
858
859/* Unlink IDMAC channels in the FSU */
860int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
861{
862 return ipu_fsu_unlink(src->ipu, src->num, sink->num);
863}
864EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
865
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200866struct ipu_devtype {
867 const char *name;
868 unsigned long cm_ofs;
869 unsigned long cpmem_ofs;
870 unsigned long srm_ofs;
871 unsigned long tpm_ofs;
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700872 unsigned long csi0_ofs;
873 unsigned long csi1_ofs;
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200874 unsigned long ic_ofs;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200875 unsigned long disp0_ofs;
876 unsigned long disp1_ofs;
877 unsigned long dc_tmpl_ofs;
878 unsigned long vdi_ofs;
879 enum ipuv3_type type;
880};
881
882static struct ipu_devtype ipu_type_imx51 = {
883 .name = "IPUv3EX",
884 .cm_ofs = 0x1e000000,
885 .cpmem_ofs = 0x1f000000,
886 .srm_ofs = 0x1f040000,
887 .tpm_ofs = 0x1f060000,
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700888 .csi0_ofs = 0x1f030000,
889 .csi1_ofs = 0x1f038000,
Philipp Zabela49e7c02014-09-22 17:15:40 +0200890 .ic_ofs = 0x1e020000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200891 .disp0_ofs = 0x1e040000,
892 .disp1_ofs = 0x1e048000,
893 .dc_tmpl_ofs = 0x1f080000,
894 .vdi_ofs = 0x1e068000,
895 .type = IPUV3EX,
896};
897
898static struct ipu_devtype ipu_type_imx53 = {
899 .name = "IPUv3M",
900 .cm_ofs = 0x06000000,
901 .cpmem_ofs = 0x07000000,
902 .srm_ofs = 0x07040000,
903 .tpm_ofs = 0x07060000,
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700904 .csi0_ofs = 0x07030000,
905 .csi1_ofs = 0x07038000,
Philipp Zabela49e7c02014-09-22 17:15:40 +0200906 .ic_ofs = 0x06020000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200907 .disp0_ofs = 0x06040000,
908 .disp1_ofs = 0x06048000,
909 .dc_tmpl_ofs = 0x07080000,
910 .vdi_ofs = 0x06068000,
911 .type = IPUV3M,
912};
913
914static struct ipu_devtype ipu_type_imx6q = {
915 .name = "IPUv3H",
916 .cm_ofs = 0x00200000,
917 .cpmem_ofs = 0x00300000,
918 .srm_ofs = 0x00340000,
919 .tpm_ofs = 0x00360000,
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700920 .csi0_ofs = 0x00230000,
921 .csi1_ofs = 0x00238000,
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200922 .ic_ofs = 0x00220000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200923 .disp0_ofs = 0x00240000,
924 .disp1_ofs = 0x00248000,
925 .dc_tmpl_ofs = 0x00380000,
926 .vdi_ofs = 0x00268000,
927 .type = IPUV3H,
928};
929
930static const struct of_device_id imx_ipu_dt_ids[] = {
931 { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
932 { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
933 { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
934 { /* sentinel */ }
935};
936MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
937
938static int ipu_submodules_init(struct ipu_soc *ipu,
939 struct platform_device *pdev, unsigned long ipu_base,
940 struct clk *ipu_clk)
941{
942 char *unit;
943 int ret;
944 struct device *dev = &pdev->dev;
945 const struct ipu_devtype *devtype = ipu->devtype;
946
Steve Longerbeam7d2691d2014-06-25 18:05:47 -0700947 ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
948 if (ret) {
949 unit = "cpmem";
950 goto err_cpmem;
951 }
952
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700953 ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
954 IPU_CONF_CSI0_EN, ipu_clk);
955 if (ret) {
956 unit = "csi0";
957 goto err_csi_0;
958 }
959
960 ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
961 IPU_CONF_CSI1_EN, ipu_clk);
962 if (ret) {
963 unit = "csi1";
964 goto err_csi_1;
965 }
966
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200967 ret = ipu_ic_init(ipu, dev,
968 ipu_base + devtype->ic_ofs,
969 ipu_base + devtype->tpm_ofs);
970 if (ret) {
971 unit = "ic";
972 goto err_ic;
973 }
974
Steve Longerbeam2d2ead42016-08-17 17:50:16 -0700975 ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
976 IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
977 IPU_CONF_IC_INPUT);
978 if (ret) {
979 unit = "vdi";
980 goto err_vdi;
981 }
982
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700983 ret = ipu_image_convert_init(ipu, dev);
984 if (ret) {
985 unit = "image_convert";
986 goto err_image_convert;
987 }
988
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200989 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200990 IPU_CONF_DI0_EN, ipu_clk);
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200991 if (ret) {
992 unit = "di0";
993 goto err_di_0;
994 }
995
996 ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
997 IPU_CONF_DI1_EN, ipu_clk);
998 if (ret) {
999 unit = "di1";
1000 goto err_di_1;
1001 }
1002
1003 ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
1004 IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
1005 if (ret) {
1006 unit = "dc_template";
1007 goto err_dc;
1008 }
1009
1010 ret = ipu_dmfc_init(ipu, dev, ipu_base +
1011 devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
1012 if (ret) {
1013 unit = "dmfc";
1014 goto err_dmfc;
1015 }
1016
1017 ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
1018 if (ret) {
1019 unit = "dp";
1020 goto err_dp;
1021 }
1022
Philipp Zabel35de9252012-05-09 16:59:01 +02001023 ret = ipu_smfc_init(ipu, dev, ipu_base +
1024 devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
1025 if (ret) {
1026 unit = "smfc";
1027 goto err_smfc;
1028 }
1029
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001030 return 0;
1031
Philipp Zabel35de9252012-05-09 16:59:01 +02001032err_smfc:
1033 ipu_dp_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001034err_dp:
1035 ipu_dmfc_exit(ipu);
1036err_dmfc:
1037 ipu_dc_exit(ipu);
1038err_dc:
1039 ipu_di_exit(ipu, 1);
1040err_di_1:
1041 ipu_di_exit(ipu, 0);
1042err_di_0:
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001043 ipu_image_convert_exit(ipu);
1044err_image_convert:
Steve Longerbeam2d2ead42016-08-17 17:50:16 -07001045 ipu_vdi_exit(ipu);
1046err_vdi:
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001047 ipu_ic_exit(ipu);
1048err_ic:
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001049 ipu_csi_exit(ipu, 1);
1050err_csi_1:
1051 ipu_csi_exit(ipu, 0);
1052err_csi_0:
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001053 ipu_cpmem_exit(ipu);
1054err_cpmem:
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001055 dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
1056 return ret;
1057}
1058
1059static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
1060{
1061 unsigned long status;
Philipp Zabelb7287662013-06-21 10:27:39 +02001062 int i, bit, irq;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001063
1064 for (i = 0; i < num_regs; i++) {
1065
1066 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
1067 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
1068
Philipp Zabelb7287662013-06-21 10:27:39 +02001069 for_each_set_bit(bit, &status, 32) {
Antoine Schweitzer-Chaput838201a2014-04-18 23:20:06 +02001070 irq = irq_linear_revmap(ipu->domain,
1071 regs[i] * 32 + bit);
Philipp Zabelb7287662013-06-21 10:27:39 +02001072 if (irq)
1073 generic_handle_irq(irq);
1074 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001075 }
1076}
1077
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +02001078static void ipu_irq_handler(struct irq_desc *desc)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001079{
1080 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
Jiang Liu4d9efdfc2015-07-13 20:39:54 +00001081 struct irq_chip *chip = irq_desc_get_chip(desc);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001082 const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001083
1084 chained_irq_enter(chip, desc);
1085
1086 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1087
1088 chained_irq_exit(chip, desc);
1089}
1090
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +02001091static void ipu_err_irq_handler(struct irq_desc *desc)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001092{
1093 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
Jiang Liu4d9efdfc2015-07-13 20:39:54 +00001094 struct irq_chip *chip = irq_desc_get_chip(desc);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001095 const int int_reg[] = { 4, 5, 8, 9};
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001096
1097 chained_irq_enter(chip, desc);
1098
1099 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1100
1101 chained_irq_exit(chip, desc);
1102}
1103
Philipp Zabel861a50c2014-04-14 23:53:16 +02001104int ipu_map_irq(struct ipu_soc *ipu, int irq)
1105{
1106 int virq;
1107
1108 virq = irq_linear_revmap(ipu->domain, irq);
1109 if (!virq)
1110 virq = irq_create_mapping(ipu->domain, irq);
1111
1112 return virq;
1113}
1114EXPORT_SYMBOL_GPL(ipu_map_irq);
1115
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001116int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
1117 enum ipu_channel_irq irq_type)
1118{
Philipp Zabel861a50c2014-04-14 23:53:16 +02001119 return ipu_map_irq(ipu, irq_type + channel->num);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001120}
1121EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
1122
1123static void ipu_submodules_exit(struct ipu_soc *ipu)
1124{
Philipp Zabel35de9252012-05-09 16:59:01 +02001125 ipu_smfc_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001126 ipu_dp_exit(ipu);
1127 ipu_dmfc_exit(ipu);
1128 ipu_dc_exit(ipu);
1129 ipu_di_exit(ipu, 1);
1130 ipu_di_exit(ipu, 0);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001131 ipu_image_convert_exit(ipu);
Steve Longerbeam2d2ead42016-08-17 17:50:16 -07001132 ipu_vdi_exit(ipu);
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001133 ipu_ic_exit(ipu);
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001134 ipu_csi_exit(ipu, 1);
1135 ipu_csi_exit(ipu, 0);
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001136 ipu_cpmem_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001137}
1138
1139static int platform_remove_devices_fn(struct device *dev, void *unused)
1140{
1141 struct platform_device *pdev = to_platform_device(dev);
1142
1143 platform_device_unregister(pdev);
1144
1145 return 0;
1146}
1147
1148static void platform_device_unregister_children(struct platform_device *pdev)
1149{
1150 device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
1151}
1152
1153struct ipu_platform_reg {
1154 struct ipu_client_platformdata pdata;
1155 const char *name;
1156};
1157
Philipp Zabel304e6be2015-11-09 16:35:12 +01001158/* These must be in the order of the corresponding device tree port nodes */
Philipp Zabel310944d2016-05-12 15:00:44 +02001159static struct ipu_platform_reg client_reg[] = {
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001160 {
1161 .pdata = {
Philipp Zabel304e6be2015-11-09 16:35:12 +01001162 .csi = 0,
1163 .dma[0] = IPUV3_CHANNEL_CSI0,
1164 .dma[1] = -EINVAL,
1165 },
Steve Longerbeam88287ec2016-07-19 18:11:11 -07001166 .name = "imx-ipuv3-csi",
Philipp Zabel304e6be2015-11-09 16:35:12 +01001167 }, {
1168 .pdata = {
1169 .csi = 1,
1170 .dma[0] = IPUV3_CHANNEL_CSI1,
1171 .dma[1] = -EINVAL,
1172 },
Steve Longerbeam88287ec2016-07-19 18:11:11 -07001173 .name = "imx-ipuv3-csi",
Philipp Zabel304e6be2015-11-09 16:35:12 +01001174 }, {
1175 .pdata = {
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001176 .di = 0,
1177 .dc = 5,
1178 .dp = IPU_DP_FLOW_SYNC_BG,
1179 .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
Philipp Zabelb8d181e2013-10-10 16:18:45 +02001180 .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001181 },
1182 .name = "imx-ipuv3-crtc",
1183 }, {
1184 .pdata = {
1185 .di = 1,
1186 .dc = 1,
1187 .dp = -EINVAL,
1188 .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
1189 .dma[1] = -EINVAL,
1190 },
1191 .name = "imx-ipuv3-crtc",
1192 },
1193};
1194
Russell King4ae078d2013-12-16 11:34:25 +00001195static DEFINE_MUTEX(ipu_client_id_mutex);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001196static int ipu_client_id;
1197
Philipp Zabeld6ca8ca2012-05-23 17:08:19 +02001198static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001199{
Russell King4ae078d2013-12-16 11:34:25 +00001200 struct device *dev = ipu->dev;
1201 unsigned i;
1202 int id, ret;
1203
1204 mutex_lock(&ipu_client_id_mutex);
1205 id = ipu_client_id;
1206 ipu_client_id += ARRAY_SIZE(client_reg);
1207 mutex_unlock(&ipu_client_id_mutex);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001208
1209 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
Philipp Zabel310944d2016-05-12 15:00:44 +02001210 struct ipu_platform_reg *reg = &client_reg[i];
Russell King4ae078d2013-12-16 11:34:25 +00001211 struct platform_device *pdev;
Philipp Zabel17e05212016-01-04 17:32:26 +01001212 struct device_node *of_node;
1213
1214 /* Associate subdevice with the corresponding port node */
1215 of_node = of_graph_get_port_by_id(dev->of_node, i);
1216 if (!of_node) {
1217 dev_info(dev,
1218 "no port@%d node in %s, not using %s%d\n",
1219 i, dev->of_node->full_name,
1220 (i / 2) ? "DI" : "CSI", i % 2);
1221 continue;
1222 }
Russell King4ae078d2013-12-16 11:34:25 +00001223
Philipp Zabel304e6be2015-11-09 16:35:12 +01001224 pdev = platform_device_alloc(reg->name, id++);
1225 if (!pdev) {
1226 ret = -ENOMEM;
1227 goto err_register;
1228 }
Russell King4ae078d2013-12-16 11:34:25 +00001229
Philipp Zabel304e6be2015-11-09 16:35:12 +01001230 pdev->dev.parent = dev;
1231
Philipp Zabel310944d2016-05-12 15:00:44 +02001232 reg->pdata.of_node = of_node;
Philipp Zabel304e6be2015-11-09 16:35:12 +01001233 ret = platform_device_add_data(pdev, &reg->pdata,
1234 sizeof(reg->pdata));
1235 if (!ret)
1236 ret = platform_device_add(pdev);
1237 if (ret) {
1238 platform_device_put(pdev);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001239 goto err_register;
Axel Line4946cd2014-08-03 10:38:18 +08001240 }
Philipp Zabel503fe872016-04-27 10:17:51 +02001241
1242 /*
1243 * Set of_node only after calling platform_device_add. Otherwise
1244 * the platform:imx-ipuv3-crtc modalias won't be used.
1245 */
1246 pdev->dev.of_node = of_node;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001247 }
1248
1249 return 0;
1250
1251err_register:
Russell King4ae078d2013-12-16 11:34:25 +00001252 platform_device_unregister_children(to_platform_device(dev));
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001253
1254 return ret;
1255}
1256
Philipp Zabelb7287662013-06-21 10:27:39 +02001257
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001258static int ipu_irq_init(struct ipu_soc *ipu)
1259{
Philipp Zabel379cdec2013-06-21 14:52:17 +02001260 struct irq_chip_generic *gc;
1261 struct irq_chip_type *ct;
Philipp Zabel37f85b262013-06-21 14:52:18 +02001262 unsigned long unused[IPU_NUM_IRQS / 32] = {
1263 0x400100d0, 0xffe000fd,
1264 0x400100d0, 0xffe000fd,
1265 0x400100d0, 0xffe000fd,
1266 0x4077ffff, 0xffe7e1fd,
1267 0x23fffffe, 0x8880fff0,
1268 0xf98fe7d0, 0xfff81fff,
1269 0x400100d0, 0xffe000fd,
1270 0x00000000,
1271 };
Philipp Zabel379cdec2013-06-21 14:52:17 +02001272 int ret, i;
1273
Philipp Zabelb7287662013-06-21 10:27:39 +02001274 ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
Philipp Zabel379cdec2013-06-21 14:52:17 +02001275 &irq_generic_chip_ops, ipu);
Philipp Zabelb7287662013-06-21 10:27:39 +02001276 if (!ipu->domain) {
1277 dev_err(ipu->dev, "failed to add irq domain\n");
1278 return -ENODEV;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001279 }
1280
Philipp Zabel379cdec2013-06-21 14:52:17 +02001281 ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
Rob Herringca0141d2015-08-29 18:01:21 -05001282 handle_level_irq, 0, 0, 0);
Philipp Zabel379cdec2013-06-21 14:52:17 +02001283 if (ret < 0) {
1284 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
1285 irq_domain_remove(ipu->domain);
1286 return ret;
1287 }
1288
Russell King510e6422015-06-16 23:29:41 +01001289 for (i = 0; i < IPU_NUM_IRQS; i += 32)
1290 ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
1291
Philipp Zabel379cdec2013-06-21 14:52:17 +02001292 for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1293 gc = irq_get_domain_generic_chip(ipu->domain, i);
1294 gc->reg_base = ipu->cm_reg;
Philipp Zabel37f85b262013-06-21 14:52:18 +02001295 gc->unused = unused[i / 32];
Philipp Zabel379cdec2013-06-21 14:52:17 +02001296 ct = gc->chip_types;
1297 ct->chip.irq_ack = irq_gc_ack_set_bit;
1298 ct->chip.irq_mask = irq_gc_mask_clr_bit;
1299 ct->chip.irq_unmask = irq_gc_mask_set_bit;
1300 ct->regs.ack = IPU_INT_STAT(i / 32);
1301 ct->regs.mask = IPU_INT_CTRL(i / 32);
1302 }
1303
Russell King86f5e732015-06-16 23:06:30 +01001304 irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
1305 irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
1306 ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001307
1308 return 0;
1309}
1310
1311static void ipu_irq_exit(struct ipu_soc *ipu)
1312{
Philipp Zabelb7287662013-06-21 10:27:39 +02001313 int i, irq;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001314
Russell King86f5e732015-06-16 23:06:30 +01001315 irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
1316 irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001317
Philipp Zabel379cdec2013-06-21 14:52:17 +02001318 /* TODO: remove irq_domain_generic_chips */
1319
Philipp Zabelb7287662013-06-21 10:27:39 +02001320 for (i = 0; i < IPU_NUM_IRQS; i++) {
1321 irq = irq_linear_revmap(ipu->domain, i);
1322 if (irq)
1323 irq_dispose_mapping(irq);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001324 }
1325
Philipp Zabelb7287662013-06-21 10:27:39 +02001326 irq_domain_remove(ipu->domain);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001327}
1328
Steve Longerbeam3feb0492014-06-25 18:05:55 -07001329void ipu_dump(struct ipu_soc *ipu)
1330{
1331 int i;
1332
1333 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
1334 ipu_cm_read(ipu, IPU_CONF));
1335 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
1336 ipu_idmac_read(ipu, IDMAC_CONF));
1337 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
1338 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
1339 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
1340 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
1341 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
1342 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
1343 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
1344 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
1345 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
1346 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
1347 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
1348 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
1349 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
1350 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
1351 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
1352 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
1353 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
1354 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
1355 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
1356 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
1357 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
1358 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
1359 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
1360 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
1361 for (i = 0; i < 15; i++)
1362 dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
1363 ipu_cm_read(ipu, IPU_INT_CTRL(i)));
1364}
1365EXPORT_SYMBOL_GPL(ipu_dump);
1366
Bill Pembertonc4aabf82012-11-19 13:22:11 -05001367static int ipu_probe(struct platform_device *pdev)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001368{
Steve Longerbeam572a7612016-07-19 18:11:02 -07001369 struct device_node *np = pdev->dev.of_node;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001370 struct ipu_soc *ipu;
1371 struct resource *res;
1372 unsigned long ipu_base;
1373 int i, ret, irq_sync, irq_err;
1374 const struct ipu_devtype *devtype;
1375
LABBE Corentine92e4472016-08-24 10:17:17 +02001376 devtype = of_device_get_match_data(&pdev->dev);
1377 if (!devtype)
1378 return -EINVAL;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001379
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001380 irq_sync = platform_get_irq(pdev, 0);
1381 irq_err = platform_get_irq(pdev, 1);
1382 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383
Fabio Estevamfd563db2012-10-24 21:36:46 -02001384 dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001385 irq_sync, irq_err);
1386
1387 if (!res || irq_sync < 0 || irq_err < 0)
1388 return -ENODEV;
1389
1390 ipu_base = res->start;
1391
1392 ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1393 if (!ipu)
1394 return -ENODEV;
1395
1396 for (i = 0; i < 64; i++)
1397 ipu->channel[i].ipu = ipu;
1398 ipu->devtype = devtype;
1399 ipu->ipu_type = devtype->type;
Steve Longerbeam572a7612016-07-19 18:11:02 -07001400 ipu->id = of_alias_get_id(np, "ipu");
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001401
1402 spin_lock_init(&ipu->lock);
1403 mutex_init(&ipu->channel_lock);
1404
Fabio Estevamfd563db2012-10-24 21:36:46 -02001405 dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001406 ipu_base + devtype->cm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001407 dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001408 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001409 dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001410 ipu_base + devtype->cpmem_ofs);
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001411 dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
1412 ipu_base + devtype->csi0_ofs);
1413 dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
1414 ipu_base + devtype->csi1_ofs);
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001415 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
1416 ipu_base + devtype->ic_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001417 dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001418 ipu_base + devtype->disp0_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001419 dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001420 ipu_base + devtype->disp1_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001421 dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001422 ipu_base + devtype->srm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001423 dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001424 ipu_base + devtype->tpm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001425 dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001426 ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001427 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001428 ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001429 dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001430 ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001431 dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001432 ipu_base + devtype->vdi_ofs);
1433
1434 ipu->cm_reg = devm_ioremap(&pdev->dev,
1435 ipu_base + devtype->cm_ofs, PAGE_SIZE);
1436 ipu->idmac_reg = devm_ioremap(&pdev->dev,
1437 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1438 PAGE_SIZE);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001439
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001440 if (!ipu->cm_reg || !ipu->idmac_reg)
Fabio Estevambe798b22013-07-20 18:22:09 -03001441 return -ENOMEM;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001442
1443 ipu->clk = devm_clk_get(&pdev->dev, "bus");
1444 if (IS_ERR(ipu->clk)) {
1445 ret = PTR_ERR(ipu->clk);
1446 dev_err(&pdev->dev, "clk_get failed with %d", ret);
Fabio Estevambe798b22013-07-20 18:22:09 -03001447 return ret;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001448 }
1449
1450 platform_set_drvdata(pdev, ipu);
1451
Fabio Estevam62645a22013-07-20 18:22:10 -03001452 ret = clk_prepare_enable(ipu->clk);
1453 if (ret) {
1454 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1455 return ret;
1456 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001457
1458 ipu->dev = &pdev->dev;
1459 ipu->irq_sync = irq_sync;
1460 ipu->irq_err = irq_err;
1461
Philipp Zabel6c641552013-03-28 17:35:21 +01001462 ret = device_reset(&pdev->dev);
1463 if (ret) {
1464 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
1465 goto out_failed_reset;
1466 }
1467 ret = ipu_memory_reset(ipu);
Lothar Waßmann4d27b2c2012-12-25 15:58:37 +01001468 if (ret)
1469 goto out_failed_reset;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001470
David Jander596a65d2015-07-02 16:21:57 +02001471 ret = ipu_irq_init(ipu);
1472 if (ret)
1473 goto out_failed_irq;
1474
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001475 /* Set MCU_T to divide MCU access window into 2 */
1476 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1477 IPU_DISP_GEN);
1478
1479 ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1480 if (ret)
1481 goto failed_submodules_init;
1482
Philipp Zabeld6ca8ca2012-05-23 17:08:19 +02001483 ret = ipu_add_client_devices(ipu, ipu_base);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001484 if (ret) {
1485 dev_err(&pdev->dev, "adding client devices failed with %d\n",
1486 ret);
1487 goto failed_add_clients;
1488 }
1489
Fabio Estevam9c2c438c2012-10-24 21:36:47 -02001490 dev_info(&pdev->dev, "%s probed\n", devtype->name);
1491
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001492 return 0;
1493
1494failed_add_clients:
1495 ipu_submodules_exit(ipu);
1496failed_submodules_init:
Philipp Zabel6c641552013-03-28 17:35:21 +01001497 ipu_irq_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001498out_failed_irq:
David Jander596a65d2015-07-02 16:21:57 +02001499out_failed_reset:
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001500 clk_disable_unprepare(ipu->clk);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001501 return ret;
1502}
1503
Bill Pemberton8aa1be42012-11-19 13:26:38 -05001504static int ipu_remove(struct platform_device *pdev)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001505{
1506 struct ipu_soc *ipu = platform_get_drvdata(pdev);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001507
1508 platform_device_unregister_children(pdev);
1509 ipu_submodules_exit(ipu);
1510 ipu_irq_exit(ipu);
1511
1512 clk_disable_unprepare(ipu->clk);
1513
1514 return 0;
1515}
1516
1517static struct platform_driver imx_ipu_driver = {
1518 .driver = {
1519 .name = "imx-ipuv3",
1520 .of_match_table = imx_ipu_dt_ids,
1521 },
1522 .probe = ipu_probe,
Bill Pemberton99c28f12012-11-19 13:20:51 -05001523 .remove = ipu_remove,
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001524};
1525
1526module_platform_driver(imx_ipu_driver);
1527
Fabio Estevam10f22682013-07-20 18:22:11 -03001528MODULE_ALIAS("platform:imx-ipuv3");
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001529MODULE_DESCRIPTION("i.MX IPU v3 driver");
1530MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1531MODULE_LICENSE("GPL");