blob: d8daecd4e642b469ec8b4f4c0308a2dcb66f1e6a [file] [log] [blame]
Terence Hampsonaeb793e2012-05-11 11:41:16 -04001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/sched.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <mach/camera.h>
19#include <linux/io.h>
20#include <mach/clk.h>
21#include <linux/clk.h>
22
Terence Hampson98d11802012-06-06 18:18:43 -040023#include <media/v4l2-event.h>
Terence Hampsonaeb793e2012-05-11 11:41:16 -040024#include <media/vcap_v4l2.h>
25#include <media/vcap_fmt.h>
26#include "vcap_vp.h"
27
28static unsigned debug;
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "VP: " fmt, ## arg); \
34 } while (0)
35
36void config_nr_buffer(struct vcap_client_data *c_data,
37 struct vcap_buffer *buf)
38{
39 struct vcap_dev *dev = c_data->dev;
40 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
41
42 writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
43 writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
44}
45
46void config_in_buffer(struct vcap_client_data *c_data,
47 struct vcap_buffer *buf)
48{
49 struct vcap_dev *dev = c_data->dev;
50 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
51
52 writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
53 writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
54}
55
56void config_out_buffer(struct vcap_client_data *c_data,
57 struct vcap_buffer *buf)
58{
59 struct vcap_dev *dev = c_data->dev;
60 int size;
61 size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
62 writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
63 writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
64}
65
66int vp_setup_buffers(struct vcap_client_data *c_data)
67{
68 struct vp_action *vp_act;
69 struct vcap_dev *dev;
70 unsigned long flags = 0;
71
72 if (!c_data->streaming)
73 return -ENOEXEC;
74 dev = c_data->dev;
75 dprintk(2, "Start setup buffers\n");
76
77 /* No need to verify vp_client is not NULL caller does so */
78 vp_act = &dev->vp_client->vid_vp_action;
79
80 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
81 if (list_empty(&vp_act->in_active)) {
82 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
83 dprintk(1, "%s: VP We have no more input buffers\n",
84 __func__);
85 return -EAGAIN;
86 }
87
88 if (list_empty(&vp_act->out_active)) {
89 spin_unlock_irqrestore(&dev->vp_client->cap_slock,
90 flags);
91 dprintk(1, "%s: VP We have no more output buffers\n",
92 __func__);
93 return -EAGAIN;
94 }
95
96 vp_act->bufT2 = list_entry(vp_act->in_active.next,
97 struct vcap_buffer, list);
98 list_del(&vp_act->bufT2->list);
99
100 vp_act->bufOut = list_entry(vp_act->out_active.next,
101 struct vcap_buffer, list);
102 list_del(&vp_act->bufOut->list);
103 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
104
105 config_in_buffer(c_data, vp_act->bufT2);
106 config_out_buffer(c_data, vp_act->bufOut);
107 return 0;
108}
109
110static void mov_buf_to_vc(struct work_struct *work)
111{
112 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
113 struct v4l2_buffer p;
114 struct vb2_buffer *vb_vc;
115 struct vcap_buffer *buf_vc;
116 struct vb2_buffer *vb_vp;
117 struct vcap_buffer *buf_vp;
118 int rc;
119
120 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
121 p.memory = V4L2_MEMORY_USERPTR;
122
123 /* This loop exits when there is no more buffers left */
124 while (1) {
125 if (!vp_work->cd->streaming)
126 return;
127 rc = vb2_dqbuf(&vp_work->cd->vp_in_vidq, &p, O_NONBLOCK);
128 if (rc < 0)
129 return;
130
131 vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
132 if (NULL == vb_vc) {
133 dprintk(1, "%s: buffer is NULL\n", __func__);
134 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
135 return;
136 }
137 buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
138
139 vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
140 if (NULL == vb_vp) {
141 dprintk(1, "%s: buffer is NULL\n", __func__);
142 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
143 return;
144 }
145 buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
146 buf_vc->ion_handle = buf_vp->ion_handle;
147 buf_vc->paddr = buf_vp->paddr;
148 buf_vp->ion_handle = NULL;
149 buf_vp->paddr = 0;
150
151 p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
152 /* This call should not fail */
153 rc = vb2_qbuf(&vp_work->cd->vc_vidq, &p);
154 if (rc < 0) {
155 dprintk(1, "%s: qbuf to vc failed\n", __func__);
156 buf_vp->ion_handle = buf_vc->ion_handle;
157 buf_vp->paddr = buf_vc->paddr;
158 buf_vc->ion_handle = NULL;
159 buf_vc->paddr = 0;
160 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
161 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
162 }
163 }
164}
165
166static void vp_wq_fnc(struct work_struct *work)
167{
168 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
169 struct vcap_dev *dev;
170 struct vp_action *vp_act;
171 uint32_t irq;
172 int rc;
173#ifndef TOP_FIELD_FIX
174 bool top_field;
175#endif
176
177 if (vp_work && vp_work->cd && vp_work->cd->dev)
178 dev = vp_work->cd->dev;
179 else
180 return;
181
182 vp_act = &dev->vp_client->vid_vp_action;
183 irq = vp_work->irq;
184
185 rc = readl_relaxed(VCAP_OFFSET(0x048));
186 while (!(rc & 0x00000100))
187 rc = readl_relaxed(VCAP_OFFSET(0x048));
188
189 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
190 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
191
192 /* Queue the done buffers */
193 if (vp_act->vp_state == VP_NORMAL &&
194 vp_act->bufNR.nr_pos != TM1_BUF) {
195 vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
196 if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
197 queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
198 }
199
200 vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
201
202 /* Cycle to next state */
203 if (vp_act->vp_state != VP_NORMAL)
204 vp_act->vp_state++;
205#ifdef TOP_FIELD_FIX
206 vp_act->top_field = !vp_act->top_field;
207#endif
208
209 /* Cycle Buffers*/
210 if (vp_work->cd->vid_vp_action.nr_enabled) {
211 if (vp_act->bufNR.nr_pos == TM1_BUF)
212 vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
213
214 if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
215 vp_act->bufNR.nr_pos++;
216
217 vp_act->bufTm1 = vp_act->bufT0;
218 vp_act->bufT0 = vp_act->bufT1;
219 vp_act->bufT1 = vp_act->bufNRT2;
220 vp_act->bufNRT2 = vp_act->bufT2;
221 config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
222 } else {
223 vp_act->bufTm1 = vp_act->bufT0;
224 vp_act->bufT0 = vp_act->bufT1;
225 vp_act->bufT1 = vp_act->bufT2;
226 }
227
228 rc = vp_setup_buffers(vp_work->cd);
229 if (rc < 0) {
230 /* setup_buf failed because we are waiting for buffers */
231 writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
232 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
233 atomic_set(&dev->vp_enabled, 0);
234 return;
235 }
236
237 /* Config VP */
238#ifndef TOP_FIELD_FIX
239 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
240 top_field = 1;
241#endif
242
243#ifdef TOP_FIELD_FIX
244 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
245 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
246#else
247 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
248 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
249#endif
250 enable_irq(dev->vpirq->start);
251 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
252}
253
254irqreturn_t vp_handler(struct vcap_dev *dev)
255{
256 struct vcap_client_data *c_data;
257 struct vp_action *vp_act;
Terence Hampson98d11802012-06-06 18:18:43 -0400258 struct v4l2_event v4l2_evt;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400259 uint32_t irq;
260 int rc;
261
262 irq = readl_relaxed(VCAP_VP_INT_STATUS);
263
Terence Hampson98d11802012-06-06 18:18:43 -0400264 if (irq & 0x02000000) {
265 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
266 VCAP_VP_REG_R_ERR_EVENT;
267 v4l2_event_queue(dev->vfd, &v4l2_evt);
268 }
269 if (irq & 0x01000000) {
270 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
271 VCAP_VC_LINE_ERR_EVENT;
272 v4l2_event_queue(dev->vfd, &v4l2_evt);
273 }
274 if (irq & 0x00020000) {
275 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
276 VCAP_VP_IN_HEIGHT_ERR_EVENT;
277 v4l2_event_queue(dev->vfd, &v4l2_evt);
278 }
279 if (irq & 0x00010000) {
280 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
281 VCAP_VP_IN_WIDTH_ERR_EVENT;
282 v4l2_event_queue(dev->vfd, &v4l2_evt);
283 }
284
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400285 dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
286 if (!irq & VP_PIC_DONE) {
287 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
288 pr_err("VP IRQ shows some error\n");
289 return IRQ_HANDLED;
290 }
291
292 if (dev->vp_client == NULL) {
293 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
294 pr_err("VC: There is no active vp client\n");
295 return IRQ_HANDLED;
296 }
297
298 vp_act = &dev->vp_client->vid_vp_action;
299 c_data = dev->vp_client;
300
301 if (vp_act->vp_state == VP_UNKNOWN) {
302 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
303 pr_err("%s: VP is in an unknown state\n",
304 __func__);
305 return -EAGAIN;
306 }
307
308 INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
309 dev->vp_work.cd = c_data;
310 dev->vp_work.irq = irq;
311 rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
312
313 disable_irq_nosync(dev->vpirq->start);
314 return IRQ_HANDLED;
315}
316
317void vp_stop_capture(struct vcap_client_data *c_data)
318{
319 struct vcap_dev *dev = c_data->dev;
320
321 writel_iowmb(0x00000000, VCAP_VP_CTRL);
322 flush_workqueue(dev->vcap_wq);
323
324 if (atomic_read(&dev->vp_enabled) == 1)
325 disable_irq(dev->vpirq->start);
326
327 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
328 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
329}
330
331int config_vp_format(struct vcap_client_data *c_data)
332{
333 struct vcap_dev *dev = c_data->dev;
334
335 INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
336 dev->vp_to_vc_work.cd = c_data;
337
338 /* SW restart VP */
339 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
340 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
341
342 /* Film Mode related settings */
343 writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
344 writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
345 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
346 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
347 writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
348 writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
349
350 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
351 writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
352 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
353 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
354 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
355 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
356 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
357 writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
358 writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
359 writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
360 writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
361 writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
362 writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
363 writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
364 writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
365 writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
366 writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
367 writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
368 writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
369 writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
370 writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
371 writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
372 writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
373 writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
374 writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
375 writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
376 writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
377 writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
378 writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
379 (c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
380 writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
381
382 return 0;
383}
384
385int init_motion_buf(struct vcap_client_data *c_data)
386{
387 struct vcap_dev *dev = c_data->dev;
388 void *buf;
389 unsigned long motion_base_addr;
390 uint32_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
391 ((c_data->vp_out_fmt.height + 7) >> 3) * 16;
392
393 if (c_data->vid_vp_action.bufMotion) {
394 pr_err("Motion buffer has already been created");
395 return -ENOEXEC;
396 }
397
398 buf = kzalloc(size, GFP_KERNEL);
399 if (!buf)
400 return -ENOMEM;
401
402 c_data->vid_vp_action.bufMotion = buf;
403 motion_base_addr = virt_to_phys(buf);
404 writel_iowmb(motion_base_addr, VCAP_VP_MOTION_EST_ADDR);
405 return 0;
406}
407
408void deinit_motion_buf(struct vcap_client_data *c_data)
409{
410 struct vcap_dev *dev = c_data->dev;
411 void *buf;
412
413 if (!c_data->vid_vp_action.bufMotion) {
414 dprintk(1, "Motion buffer has not been created");
415 return;
416 }
417
418 buf = c_data->vid_vp_action.bufMotion;
419
420 writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
421 c_data->vid_vp_action.bufMotion = NULL;
422 kfree(buf);
423 return;
424}
425
426int init_nr_buf(struct vcap_client_data *c_data)
427{
428 struct vcap_dev *dev = c_data->dev;
429 struct nr_buffer *buf;
430 uint32_t frame_size, tot_size, rc;
431
432 if (c_data->vid_vp_action.bufNR.vaddr) {
433 pr_err("NR buffer has already been created");
434 return -ENOEXEC;
435 }
436 buf = &c_data->vid_vp_action.bufNR;
437
438 frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
439 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
440 tot_size = frame_size * 2;
441 else
442 tot_size = frame_size / 2 * 3;
443
444 buf->vaddr = kzalloc(tot_size, GFP_KERNEL);
445 if (!buf)
446 return -ENOMEM;
447
448 buf->paddr = virt_to_phys(buf->vaddr);
449 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
450 rc |= 0x02D00001;
451 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
452 writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
453 writel_relaxed(buf->paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
454 buf->nr_pos = NRT2_BUF;
455 return 0;
456}
457
458void deinit_nr_buf(struct vcap_client_data *c_data)
459{
460 struct vcap_dev *dev = c_data->dev;
461 struct nr_buffer *buf;
462 uint32_t rc;
463
464 if (!c_data->vid_vp_action.bufNR.vaddr) {
465 pr_err("NR buffer has not been created");
466 return;
467 }
468
469 buf = &c_data->vid_vp_action.bufNR;
470
471 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
472 rc &= !(0x02D00001);
473 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
474
475 kfree(buf->vaddr);
476 buf->paddr = 0;
477 buf->vaddr = NULL;
478 return;
479}
480
481int kickoff_vp(struct vcap_client_data *c_data)
482{
483 struct vcap_dev *dev;
484 struct vp_action *vp_act;
485 unsigned long flags = 0;
486 unsigned int chroma_fmt = 0;
487 int size;
488#ifndef TOP_FIELD_FIX
489 bool top_field;
490#endif
491
492 if (!c_data->streaming)
493 return -ENOEXEC;
494
495 dev = c_data->dev;
496 dprintk(2, "Start Kickoff\n");
497
498 if (dev->vp_client == NULL) {
499 pr_err("No active vp client\n");
500 return -ENODEV;
501 }
502 vp_act = &dev->vp_client->vid_vp_action;
503
504 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
505 if (list_empty(&vp_act->in_active)) {
506 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
507 pr_err("%s: VP We have no more input buffers\n",
508 __func__);
509 return -EAGAIN;
510 }
511
512 vp_act->bufT1 = list_entry(vp_act->in_active.next,
513 struct vcap_buffer, list);
514 list_del(&vp_act->bufT1->list);
515
516 if (list_empty(&vp_act->in_active)) {
517 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
518 list_add(&vp_act->bufT1->list, &vp_act->in_active);
519 pr_err("%s: VP We have no more input buffers\n",
520 __func__);
521 return -EAGAIN;
522 }
523
524 vp_act->bufT2 = list_entry(vp_act->in_active.next,
525 struct vcap_buffer, list);
526 list_del(&vp_act->bufT2->list);
527
528 if (list_empty(&vp_act->out_active)) {
529 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
530 list_add(&vp_act->bufT2->list, &vp_act->in_active);
531 list_add(&vp_act->bufT1->list, &vp_act->in_active);
532 pr_err("%s: VP We have no more output buffers\n",
533 __func__);
534 return -EAGAIN;
535 }
536
537 vp_act->bufOut = list_entry(vp_act->out_active.next,
538 struct vcap_buffer, list);
539 list_del(&vp_act->bufOut->list);
540 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
541
542 size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
543 writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
544 writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
545
546 config_in_buffer(c_data, vp_act->bufT2);
547 config_out_buffer(c_data, vp_act->bufOut);
548
549 /* Config VP */
550 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
551 chroma_fmt = 1;
552 writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
553 chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
554
555 chroma_fmt = 0;
556 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
557 chroma_fmt = 1;
558
559 writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
560 chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
561
562 /* Enable Interrupt */
563#ifdef TOP_FIELD_FIX
564 vp_act->top_field = 1;
565#else
566 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
567 top_field = 1;
568#endif
569 vp_act->vp_state = VP_FRAME2;
570 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
571#ifdef TOP_FIELD_FIX
572 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
573 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
574#else
575 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
576 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
577#endif
578 atomic_set(&c_data->dev->vp_enabled, 1);
579 enable_irq(dev->vpirq->start);
580 return 0;
581}
582
583int continue_vp(struct vcap_client_data *c_data)
584{
585 struct vcap_dev *dev;
586 struct vp_action *vp_act;
587 int rc;
588#ifndef TOP_FIELD_FIX
589 bool top_field;
590#endif
591
592 dprintk(2, "Start Continue\n");
593 dev = c_data->dev;
594
595 if (dev->vp_client == NULL) {
596 pr_err("No active vp client\n");
597 return -ENODEV;
598 }
599 vp_act = &dev->vp_client->vid_vp_action;
600
601 if (vp_act->vp_state == VP_UNKNOWN) {
602 pr_err("%s: VP is in an unknown state\n",
603 __func__);
604 return -EAGAIN;
605 }
606
607 rc = vp_setup_buffers(c_data);
608 if (rc < 0)
609 return rc;
610
611#ifndef TOP_FIELD_FIX
612 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
613 top_field = 1;
614#endif
615
616 /* Config VP & Enable Interrupt */
617 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
618#ifdef TOP_FIELD_FIX
619 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
620 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
621#else
622 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
623 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
624#endif
625
626 atomic_set(&c_data->dev->vp_enabled, 1);
627 enable_irq(dev->vpirq->start);
628 return 0;
629}