blob: 38dac672aa1c2471a15efedd2a6ad0b9a84eacb0 [file] [log] [blame]
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15#include "regs-mixer.h"
16#include "regs-vp.h"
17
18#include <linux/delay.h>
19
20/* Register access subroutines */
21
22static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
23{
24 return readl(mdev->res.vp_regs + reg_id);
25}
26
27static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
28{
29 writel(val, mdev->res.vp_regs + reg_id);
30}
31
32static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
33 u32 val, u32 mask)
34{
35 u32 old = vp_read(mdev, reg_id);
36
37 val = (val & mask) | (old & ~mask);
38 writel(val, mdev->res.vp_regs + reg_id);
39}
40
41static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
42{
43 return readl(mdev->res.mxr_regs + reg_id);
44}
45
46static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
47{
48 writel(val, mdev->res.mxr_regs + reg_id);
49}
50
51static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
52 u32 val, u32 mask)
53{
54 u32 old = mxr_read(mdev, reg_id);
55
56 val = (val & mask) | (old & ~mask);
57 writel(val, mdev->res.mxr_regs + reg_id);
58}
59
60void mxr_vsync_set_update(struct mxr_device *mdev, int en)
61{
62 /* block update on vsync */
63 mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64 MXR_STATUS_SYNC_ENABLE);
65 vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
66}
67
68static void __mxr_reg_vp_reset(struct mxr_device *mdev)
69{
70 int tries = 100;
71
72 vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73 for (tries = 100; tries; --tries) {
74 /* waiting until VP_SRESET_PROCESSING is 0 */
75 if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
76 break;
77 mdelay(10);
78 }
79 WARN(tries == 0, "failed to reset Video Processor\n");
80}
81
82static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
83
84void mxr_reg_reset(struct mxr_device *mdev)
85{
86 unsigned long flags;
87 u32 val; /* value stored to register */
88
89 spin_lock_irqsave(&mdev->reg_slock, flags);
90 mxr_vsync_set_update(mdev, MXR_DISABLE);
91
92 /* set output in RGB888 mode */
93 mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444);
94
95 /* 16 beat burst in DMA */
96 mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97 MXR_STATUS_BURST_MASK);
98
99 /* setting default layer priority: layer1 > video > layer0
100 * because typical usage scenario would be
101 * layer0 - framebuffer
102 * video - video overlay
103 * layer1 - OSD
104 */
105 val = MXR_LAYER_CFG_GRP0_VAL(1);
106 val |= MXR_LAYER_CFG_VP_VAL(2);
107 val |= MXR_LAYER_CFG_GRP1_VAL(3);
108 mxr_write(mdev, MXR_LAYER_CFG, val);
109
110 /* use dark gray background color */
111 mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112 mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113 mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
114
115 /* setting graphical layers */
116
117 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
118 val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
119 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
120
121 /* the same configuration for both layers */
122 mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123 mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
124
125 /* configuration of Video Processor Registers */
126 __mxr_reg_vp_reset(mdev);
127 mxr_reg_vp_default_filter(mdev);
128
129 /* enable all interrupts */
130 mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
131
132 mxr_vsync_set_update(mdev, MXR_ENABLE);
133 spin_unlock_irqrestore(&mdev->reg_slock, flags);
134}
135
136void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137 const struct mxr_format *fmt, const struct mxr_geometry *geo)
138{
139 u32 val;
140 unsigned long flags;
141
142 spin_lock_irqsave(&mdev->reg_slock, flags);
143 mxr_vsync_set_update(mdev, MXR_DISABLE);
144
145 /* setup format */
146 mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147 MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
148
149 /* setup geometry */
150 mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151 val = MXR_GRP_WH_WIDTH(geo->src.width);
152 val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153 val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154 val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155 mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
156
157 /* setup offsets in source image */
158 val = MXR_GRP_SXY_SX(geo->src.x_offset);
159 val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160 mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
161
162 /* setup offsets in display image */
163 val = MXR_GRP_DXY_DX(geo->dst.x_offset);
164 val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165 mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
166
167 mxr_vsync_set_update(mdev, MXR_ENABLE);
168 spin_unlock_irqrestore(&mdev->reg_slock, flags);
169}
170
171void mxr_reg_vp_format(struct mxr_device *mdev,
172 const struct mxr_format *fmt, const struct mxr_geometry *geo)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&mdev->reg_slock, flags);
177 mxr_vsync_set_update(mdev, MXR_DISABLE);
178
179 vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
180
181 /* setting size of input image */
182 vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183 VP_IMG_VSIZE(geo->src.full_height));
184 /* chroma height has to reduced by 2 to avoid chroma distorions */
185 vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186 VP_IMG_VSIZE(geo->src.full_height / 2));
187
188 vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189 vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190 vp_write(mdev, VP_SRC_H_POSITION,
191 VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192 vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
193
194 vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195 vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196 if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
199 } else {
200 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
202 }
203
204 vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205 vp_write(mdev, VP_V_RATIO, geo->y_ratio);
206
207 vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
208
209 mxr_vsync_set_update(mdev, MXR_ENABLE);
210 spin_unlock_irqrestore(&mdev->reg_slock, flags);
211
212}
213
214void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
215{
216 u32 val = addr ? ~0 : 0;
217 unsigned long flags;
218
219 spin_lock_irqsave(&mdev->reg_slock, flags);
220 mxr_vsync_set_update(mdev, MXR_DISABLE);
221
222 if (idx == 0)
223 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
224 else
225 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226 mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
227
228 mxr_vsync_set_update(mdev, MXR_ENABLE);
229 spin_unlock_irqrestore(&mdev->reg_slock, flags);
230}
231
232void mxr_reg_vp_buffer(struct mxr_device *mdev,
233 dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
234{
235 u32 val = luma_addr[0] ? ~0 : 0;
236 unsigned long flags;
237
238 spin_lock_irqsave(&mdev->reg_slock, flags);
239 mxr_vsync_set_update(mdev, MXR_DISABLE);
240
241 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242 vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243 /* TODO: fix tiled mode */
244 vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245 vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246 vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247 vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
248
249 mxr_vsync_set_update(mdev, MXR_ENABLE);
250 spin_unlock_irqrestore(&mdev->reg_slock, flags);
251}
252
253static void mxr_irq_layer_handle(struct mxr_layer *layer)
254{
255 struct list_head *head = &layer->enq_list;
256 struct mxr_buffer *done;
257
258 /* skip non-existing layer */
259 if (layer == NULL)
260 return;
261
262 spin_lock(&layer->enq_slock);
263 if (layer->state == MXR_LAYER_IDLE)
264 goto done;
265
266 done = layer->shadow_buf;
267 layer->shadow_buf = layer->update_buf;
268
269 if (list_empty(head)) {
270 if (layer->state != MXR_LAYER_STREAMING)
271 layer->update_buf = NULL;
272 } else {
273 struct mxr_buffer *next;
274 next = list_first_entry(head, struct mxr_buffer, list);
275 list_del(&next->list);
276 layer->update_buf = next;
277 }
278
279 layer->ops.buffer_set(layer, layer->update_buf);
280
281 if (done && done != layer->shadow_buf)
282 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
283
284done:
285 spin_unlock(&layer->enq_slock);
286}
287
288irqreturn_t mxr_irq_handler(int irq, void *dev_data)
289{
290 struct mxr_device *mdev = dev_data;
291 u32 i, val;
292
293 spin_lock(&mdev->reg_slock);
294 val = mxr_read(mdev, MXR_INT_STATUS);
295
296 /* wake up process waiting for VSYNC */
297 if (val & MXR_INT_STATUS_VSYNC) {
298 set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299 wake_up(&mdev->event_queue);
300 }
301
302 /* clear interrupts */
303 if (~val & MXR_INT_EN_VSYNC) {
304 /* vsync interrupt use different bit for read and clear */
305 val &= ~MXR_INT_EN_VSYNC;
306 val |= MXR_INT_CLEAR_VSYNC;
307 }
308 mxr_write(mdev, MXR_INT_STATUS, val);
309
310 spin_unlock(&mdev->reg_slock);
311 /* leave on non-vsync event */
312 if (~val & MXR_INT_CLEAR_VSYNC)
313 return IRQ_HANDLED;
314 for (i = 0; i < MXR_MAX_LAYERS; ++i)
315 mxr_irq_layer_handle(mdev->layer[i]);
316 return IRQ_HANDLED;
317}
318
319void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
320{
321 u32 val;
322
323 val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
324 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
325}
326
327void mxr_reg_streamon(struct mxr_device *mdev)
328{
329 unsigned long flags;
330
331 spin_lock_irqsave(&mdev->reg_slock, flags);
332 /* single write -> no need to block vsync update */
333
334 /* start MIXER */
335 mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
336
337 spin_unlock_irqrestore(&mdev->reg_slock, flags);
338}
339
340void mxr_reg_streamoff(struct mxr_device *mdev)
341{
342 unsigned long flags;
343
344 spin_lock_irqsave(&mdev->reg_slock, flags);
345 /* single write -> no need to block vsync update */
346
347 /* stop MIXER */
348 mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
349
350 spin_unlock_irqrestore(&mdev->reg_slock, flags);
351}
352
353int mxr_reg_wait4vsync(struct mxr_device *mdev)
354{
355 int ret;
356
357 clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
358 /* TODO: consider adding interruptible */
359 ret = wait_event_timeout(mdev->event_queue,
360 test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
361 msecs_to_jiffies(1000));
362 if (ret > 0)
363 return 0;
364 if (ret < 0)
365 return ret;
366 mxr_warn(mdev, "no vsync detected - timeout\n");
367 return -ETIME;
368}
369
370void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
371 struct v4l2_mbus_framefmt *fmt)
372{
373 u32 val = 0;
374 unsigned long flags;
375
376 spin_lock_irqsave(&mdev->reg_slock, flags);
377 mxr_vsync_set_update(mdev, MXR_DISABLE);
378
379 /* choosing between interlace and progressive mode */
380 if (fmt->field == V4L2_FIELD_INTERLACED)
381 val |= MXR_CFG_SCAN_INTERLACE;
382 else
383 val |= MXR_CFG_SCAN_PROGRASSIVE;
384
385 /* choosing between porper HD and SD mode */
386 if (fmt->height == 480)
387 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
388 else if (fmt->height == 576)
389 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
390 else if (fmt->height == 720)
391 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
392 else if (fmt->height == 1080)
393 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
394 else
395 WARN(1, "unrecognized mbus height %u!\n", fmt->height);
396
397 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK);
398
399 val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
400 vp_write_mask(mdev, VP_MODE, val,
401 VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
402
403 mxr_vsync_set_update(mdev, MXR_ENABLE);
404 spin_unlock_irqrestore(&mdev->reg_slock, flags);
405}
406
407void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
408{
409 /* no extra actions need to be done */
410}
411
412void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
413{
414 /* no extra actions need to be done */
415}
416
417static const u8 filter_y_horiz_tap8[] = {
418 0, -1, -1, -1, -1, -1, -1, -1,
419 -1, -1, -1, -1, -1, 0, 0, 0,
420 0, 2, 4, 5, 6, 6, 6, 6,
421 6, 5, 5, 4, 3, 2, 1, 1,
422 0, -6, -12, -16, -18, -20, -21, -20,
423 -20, -18, -16, -13, -10, -8, -5, -2,
424 127, 126, 125, 121, 114, 107, 99, 89,
425 79, 68, 57, 46, 35, 25, 16, 8,
426};
427
428static const u8 filter_y_vert_tap4[] = {
429 0, -3, -6, -8, -8, -8, -8, -7,
430 -6, -5, -4, -3, -2, -1, -1, 0,
431 127, 126, 124, 118, 111, 102, 92, 81,
432 70, 59, 48, 37, 27, 19, 11, 5,
433 0, 5, 11, 19, 27, 37, 48, 59,
434 70, 81, 92, 102, 111, 118, 124, 126,
435 0, 0, -1, -1, -2, -3, -4, -5,
436 -6, -7, -8, -8, -8, -8, -6, -3,
437};
438
439static const u8 filter_cr_horiz_tap4[] = {
440 0, -3, -6, -8, -8, -8, -8, -7,
441 -6, -5, -4, -3, -2, -1, -1, 0,
442 127, 126, 124, 118, 111, 102, 92, 81,
443 70, 59, 48, 37, 27, 19, 11, 5,
444};
445
446static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
447 int reg_id, const u8 *data, unsigned int size)
448{
449 /* assure 4-byte align */
450 BUG_ON(size & 3);
451 for (; size; size -= 4, reg_id += 4, data += 4) {
452 u32 val = (data[0] << 24) | (data[1] << 16) |
453 (data[2] << 8) | data[3];
454 vp_write(mdev, reg_id, val);
455 }
456}
457
458static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
459{
460 mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
461 filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
462 mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
463 filter_y_vert_tap4, sizeof filter_y_vert_tap4);
464 mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
465 filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
466}
467
468static void mxr_reg_mxr_dump(struct mxr_device *mdev)
469{
470#define DUMPREG(reg_id) \
471do { \
472 mxr_dbg(mdev, #reg_id " = %08x\n", \
473 (u32)readl(mdev->res.mxr_regs + reg_id)); \
474} while (0)
475
476 DUMPREG(MXR_STATUS);
477 DUMPREG(MXR_CFG);
478 DUMPREG(MXR_INT_EN);
479 DUMPREG(MXR_INT_STATUS);
480
481 DUMPREG(MXR_LAYER_CFG);
482 DUMPREG(MXR_VIDEO_CFG);
483
484 DUMPREG(MXR_GRAPHIC0_CFG);
485 DUMPREG(MXR_GRAPHIC0_BASE);
486 DUMPREG(MXR_GRAPHIC0_SPAN);
487 DUMPREG(MXR_GRAPHIC0_WH);
488 DUMPREG(MXR_GRAPHIC0_SXY);
489 DUMPREG(MXR_GRAPHIC0_DXY);
490
491 DUMPREG(MXR_GRAPHIC1_CFG);
492 DUMPREG(MXR_GRAPHIC1_BASE);
493 DUMPREG(MXR_GRAPHIC1_SPAN);
494 DUMPREG(MXR_GRAPHIC1_WH);
495 DUMPREG(MXR_GRAPHIC1_SXY);
496 DUMPREG(MXR_GRAPHIC1_DXY);
497#undef DUMPREG
498}
499
500static void mxr_reg_vp_dump(struct mxr_device *mdev)
501{
502#define DUMPREG(reg_id) \
503do { \
504 mxr_dbg(mdev, #reg_id " = %08x\n", \
505 (u32) readl(mdev->res.vp_regs + reg_id)); \
506} while (0)
507
508
509 DUMPREG(VP_ENABLE);
510 DUMPREG(VP_SRESET);
511 DUMPREG(VP_SHADOW_UPDATE);
512 DUMPREG(VP_FIELD_ID);
513 DUMPREG(VP_MODE);
514 DUMPREG(VP_IMG_SIZE_Y);
515 DUMPREG(VP_IMG_SIZE_C);
516 DUMPREG(VP_PER_RATE_CTRL);
517 DUMPREG(VP_TOP_Y_PTR);
518 DUMPREG(VP_BOT_Y_PTR);
519 DUMPREG(VP_TOP_C_PTR);
520 DUMPREG(VP_BOT_C_PTR);
521 DUMPREG(VP_ENDIAN_MODE);
522 DUMPREG(VP_SRC_H_POSITION);
523 DUMPREG(VP_SRC_V_POSITION);
524 DUMPREG(VP_SRC_WIDTH);
525 DUMPREG(VP_SRC_HEIGHT);
526 DUMPREG(VP_DST_H_POSITION);
527 DUMPREG(VP_DST_V_POSITION);
528 DUMPREG(VP_DST_WIDTH);
529 DUMPREG(VP_DST_HEIGHT);
530 DUMPREG(VP_H_RATIO);
531 DUMPREG(VP_V_RATIO);
532
533#undef DUMPREG
534}
535
536void mxr_reg_dump(struct mxr_device *mdev)
537{
538 mxr_reg_mxr_dump(mdev);
539 mxr_reg_vp_dump(mdev);
540}
541