blob: f8dfdc1ade1c6109b11c768594679982a6fb3b7e [file] [log] [blame]
Terence Hampsonaeb793e2012-05-11 11:41:16 -04001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/sched.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <mach/camera.h>
19#include <linux/io.h>
20#include <mach/clk.h>
21#include <linux/clk.h>
22
23#include <media/vcap_v4l2.h>
24#include <media/vcap_fmt.h>
25#include "vcap_vp.h"
26
27static unsigned debug;
28
29#define dprintk(level, fmt, arg...) \
30 do { \
31 if (debug >= level) \
32 printk(KERN_DEBUG "VP: " fmt, ## arg); \
33 } while (0)
34
35void config_nr_buffer(struct vcap_client_data *c_data,
36 struct vcap_buffer *buf)
37{
38 struct vcap_dev *dev = c_data->dev;
39 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
40
41 writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
42 writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
43}
44
45void config_in_buffer(struct vcap_client_data *c_data,
46 struct vcap_buffer *buf)
47{
48 struct vcap_dev *dev = c_data->dev;
49 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
50
51 writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
52 writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
53}
54
55void config_out_buffer(struct vcap_client_data *c_data,
56 struct vcap_buffer *buf)
57{
58 struct vcap_dev *dev = c_data->dev;
59 int size;
60 size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
61 writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
62 writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
63}
64
65int vp_setup_buffers(struct vcap_client_data *c_data)
66{
67 struct vp_action *vp_act;
68 struct vcap_dev *dev;
69 unsigned long flags = 0;
70
71 if (!c_data->streaming)
72 return -ENOEXEC;
73 dev = c_data->dev;
74 dprintk(2, "Start setup buffers\n");
75
76 /* No need to verify vp_client is not NULL caller does so */
77 vp_act = &dev->vp_client->vid_vp_action;
78
79 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
80 if (list_empty(&vp_act->in_active)) {
81 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
82 dprintk(1, "%s: VP We have no more input buffers\n",
83 __func__);
84 return -EAGAIN;
85 }
86
87 if (list_empty(&vp_act->out_active)) {
88 spin_unlock_irqrestore(&dev->vp_client->cap_slock,
89 flags);
90 dprintk(1, "%s: VP We have no more output buffers\n",
91 __func__);
92 return -EAGAIN;
93 }
94
95 vp_act->bufT2 = list_entry(vp_act->in_active.next,
96 struct vcap_buffer, list);
97 list_del(&vp_act->bufT2->list);
98
99 vp_act->bufOut = list_entry(vp_act->out_active.next,
100 struct vcap_buffer, list);
101 list_del(&vp_act->bufOut->list);
102 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
103
104 config_in_buffer(c_data, vp_act->bufT2);
105 config_out_buffer(c_data, vp_act->bufOut);
106 return 0;
107}
108
109static void mov_buf_to_vc(struct work_struct *work)
110{
111 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
112 struct v4l2_buffer p;
113 struct vb2_buffer *vb_vc;
114 struct vcap_buffer *buf_vc;
115 struct vb2_buffer *vb_vp;
116 struct vcap_buffer *buf_vp;
117 int rc;
118
119 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
120 p.memory = V4L2_MEMORY_USERPTR;
121
122 /* This loop exits when there is no more buffers left */
123 while (1) {
124 if (!vp_work->cd->streaming)
125 return;
126 rc = vb2_dqbuf(&vp_work->cd->vp_in_vidq, &p, O_NONBLOCK);
127 if (rc < 0)
128 return;
129
130 vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
131 if (NULL == vb_vc) {
132 dprintk(1, "%s: buffer is NULL\n", __func__);
133 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
134 return;
135 }
136 buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
137
138 vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
139 if (NULL == vb_vp) {
140 dprintk(1, "%s: buffer is NULL\n", __func__);
141 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
142 return;
143 }
144 buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
145 buf_vc->ion_handle = buf_vp->ion_handle;
146 buf_vc->paddr = buf_vp->paddr;
147 buf_vp->ion_handle = NULL;
148 buf_vp->paddr = 0;
149
150 p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
151 /* This call should not fail */
152 rc = vb2_qbuf(&vp_work->cd->vc_vidq, &p);
153 if (rc < 0) {
154 dprintk(1, "%s: qbuf to vc failed\n", __func__);
155 buf_vp->ion_handle = buf_vc->ion_handle;
156 buf_vp->paddr = buf_vc->paddr;
157 buf_vc->ion_handle = NULL;
158 buf_vc->paddr = 0;
159 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
160 vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
161 }
162 }
163}
164
165static void vp_wq_fnc(struct work_struct *work)
166{
167 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
168 struct vcap_dev *dev;
169 struct vp_action *vp_act;
170 uint32_t irq;
171 int rc;
172#ifndef TOP_FIELD_FIX
173 bool top_field;
174#endif
175
176 if (vp_work && vp_work->cd && vp_work->cd->dev)
177 dev = vp_work->cd->dev;
178 else
179 return;
180
181 vp_act = &dev->vp_client->vid_vp_action;
182 irq = vp_work->irq;
183
184 rc = readl_relaxed(VCAP_OFFSET(0x048));
185 while (!(rc & 0x00000100))
186 rc = readl_relaxed(VCAP_OFFSET(0x048));
187
188 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
189 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
190
191 /* Queue the done buffers */
192 if (vp_act->vp_state == VP_NORMAL &&
193 vp_act->bufNR.nr_pos != TM1_BUF) {
194 vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
195 if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
196 queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
197 }
198
199 vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
200
201 /* Cycle to next state */
202 if (vp_act->vp_state != VP_NORMAL)
203 vp_act->vp_state++;
204#ifdef TOP_FIELD_FIX
205 vp_act->top_field = !vp_act->top_field;
206#endif
207
208 /* Cycle Buffers*/
209 if (vp_work->cd->vid_vp_action.nr_enabled) {
210 if (vp_act->bufNR.nr_pos == TM1_BUF)
211 vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
212
213 if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
214 vp_act->bufNR.nr_pos++;
215
216 vp_act->bufTm1 = vp_act->bufT0;
217 vp_act->bufT0 = vp_act->bufT1;
218 vp_act->bufT1 = vp_act->bufNRT2;
219 vp_act->bufNRT2 = vp_act->bufT2;
220 config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
221 } else {
222 vp_act->bufTm1 = vp_act->bufT0;
223 vp_act->bufT0 = vp_act->bufT1;
224 vp_act->bufT1 = vp_act->bufT2;
225 }
226
227 rc = vp_setup_buffers(vp_work->cd);
228 if (rc < 0) {
229 /* setup_buf failed because we are waiting for buffers */
230 writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
231 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
232 atomic_set(&dev->vp_enabled, 0);
233 return;
234 }
235
236 /* Config VP */
237#ifndef TOP_FIELD_FIX
238 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
239 top_field = 1;
240#endif
241
242#ifdef TOP_FIELD_FIX
243 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
244 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
245#else
246 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
247 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
248#endif
249 enable_irq(dev->vpirq->start);
250 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
251}
252
253irqreturn_t vp_handler(struct vcap_dev *dev)
254{
255 struct vcap_client_data *c_data;
256 struct vp_action *vp_act;
257 uint32_t irq;
258 int rc;
259
260 irq = readl_relaxed(VCAP_VP_INT_STATUS);
261
262 dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
263 if (!irq & VP_PIC_DONE) {
264 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
265 pr_err("VP IRQ shows some error\n");
266 return IRQ_HANDLED;
267 }
268
269 if (dev->vp_client == NULL) {
270 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
271 pr_err("VC: There is no active vp client\n");
272 return IRQ_HANDLED;
273 }
274
275 vp_act = &dev->vp_client->vid_vp_action;
276 c_data = dev->vp_client;
277
278 if (vp_act->vp_state == VP_UNKNOWN) {
279 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
280 pr_err("%s: VP is in an unknown state\n",
281 __func__);
282 return -EAGAIN;
283 }
284
285 INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
286 dev->vp_work.cd = c_data;
287 dev->vp_work.irq = irq;
288 rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
289
290 disable_irq_nosync(dev->vpirq->start);
291 return IRQ_HANDLED;
292}
293
294void vp_stop_capture(struct vcap_client_data *c_data)
295{
296 struct vcap_dev *dev = c_data->dev;
297
298 writel_iowmb(0x00000000, VCAP_VP_CTRL);
299 flush_workqueue(dev->vcap_wq);
300
301 if (atomic_read(&dev->vp_enabled) == 1)
302 disable_irq(dev->vpirq->start);
303
304 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
305 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
306}
307
308int config_vp_format(struct vcap_client_data *c_data)
309{
310 struct vcap_dev *dev = c_data->dev;
311
312 INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
313 dev->vp_to_vc_work.cd = c_data;
314
315 /* SW restart VP */
316 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
317 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
318
319 /* Film Mode related settings */
320 writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
321 writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
322 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
323 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
324 writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
325 writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
326
327 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
328 writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
329 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
330 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
331 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
332 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
333 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
334 writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
335 writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
336 writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
337 writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
338 writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
339 writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
340 writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
341 writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
342 writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
343 writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
344 writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
345 writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
346 writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
347 writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
348 writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
349 writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
350 writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
351 writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
352 writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
353 writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
354 writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
355 writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
356 (c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
357 writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
358
359 return 0;
360}
361
362int init_motion_buf(struct vcap_client_data *c_data)
363{
364 struct vcap_dev *dev = c_data->dev;
365 void *buf;
366 unsigned long motion_base_addr;
367 uint32_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
368 ((c_data->vp_out_fmt.height + 7) >> 3) * 16;
369
370 if (c_data->vid_vp_action.bufMotion) {
371 pr_err("Motion buffer has already been created");
372 return -ENOEXEC;
373 }
374
375 buf = kzalloc(size, GFP_KERNEL);
376 if (!buf)
377 return -ENOMEM;
378
379 c_data->vid_vp_action.bufMotion = buf;
380 motion_base_addr = virt_to_phys(buf);
381 writel_iowmb(motion_base_addr, VCAP_VP_MOTION_EST_ADDR);
382 return 0;
383}
384
385void deinit_motion_buf(struct vcap_client_data *c_data)
386{
387 struct vcap_dev *dev = c_data->dev;
388 void *buf;
389
390 if (!c_data->vid_vp_action.bufMotion) {
391 dprintk(1, "Motion buffer has not been created");
392 return;
393 }
394
395 buf = c_data->vid_vp_action.bufMotion;
396
397 writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
398 c_data->vid_vp_action.bufMotion = NULL;
399 kfree(buf);
400 return;
401}
402
403int init_nr_buf(struct vcap_client_data *c_data)
404{
405 struct vcap_dev *dev = c_data->dev;
406 struct nr_buffer *buf;
407 uint32_t frame_size, tot_size, rc;
408
409 if (c_data->vid_vp_action.bufNR.vaddr) {
410 pr_err("NR buffer has already been created");
411 return -ENOEXEC;
412 }
413 buf = &c_data->vid_vp_action.bufNR;
414
415 frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
416 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
417 tot_size = frame_size * 2;
418 else
419 tot_size = frame_size / 2 * 3;
420
421 buf->vaddr = kzalloc(tot_size, GFP_KERNEL);
422 if (!buf)
423 return -ENOMEM;
424
425 buf->paddr = virt_to_phys(buf->vaddr);
426 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
427 rc |= 0x02D00001;
428 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
429 writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
430 writel_relaxed(buf->paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
431 buf->nr_pos = NRT2_BUF;
432 return 0;
433}
434
435void deinit_nr_buf(struct vcap_client_data *c_data)
436{
437 struct vcap_dev *dev = c_data->dev;
438 struct nr_buffer *buf;
439 uint32_t rc;
440
441 if (!c_data->vid_vp_action.bufNR.vaddr) {
442 pr_err("NR buffer has not been created");
443 return;
444 }
445
446 buf = &c_data->vid_vp_action.bufNR;
447
448 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
449 rc &= !(0x02D00001);
450 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
451
452 kfree(buf->vaddr);
453 buf->paddr = 0;
454 buf->vaddr = NULL;
455 return;
456}
457
458int kickoff_vp(struct vcap_client_data *c_data)
459{
460 struct vcap_dev *dev;
461 struct vp_action *vp_act;
462 unsigned long flags = 0;
463 unsigned int chroma_fmt = 0;
464 int size;
465#ifndef TOP_FIELD_FIX
466 bool top_field;
467#endif
468
469 if (!c_data->streaming)
470 return -ENOEXEC;
471
472 dev = c_data->dev;
473 dprintk(2, "Start Kickoff\n");
474
475 if (dev->vp_client == NULL) {
476 pr_err("No active vp client\n");
477 return -ENODEV;
478 }
479 vp_act = &dev->vp_client->vid_vp_action;
480
481 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
482 if (list_empty(&vp_act->in_active)) {
483 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
484 pr_err("%s: VP We have no more input buffers\n",
485 __func__);
486 return -EAGAIN;
487 }
488
489 vp_act->bufT1 = list_entry(vp_act->in_active.next,
490 struct vcap_buffer, list);
491 list_del(&vp_act->bufT1->list);
492
493 if (list_empty(&vp_act->in_active)) {
494 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
495 list_add(&vp_act->bufT1->list, &vp_act->in_active);
496 pr_err("%s: VP We have no more input buffers\n",
497 __func__);
498 return -EAGAIN;
499 }
500
501 vp_act->bufT2 = list_entry(vp_act->in_active.next,
502 struct vcap_buffer, list);
503 list_del(&vp_act->bufT2->list);
504
505 if (list_empty(&vp_act->out_active)) {
506 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
507 list_add(&vp_act->bufT2->list, &vp_act->in_active);
508 list_add(&vp_act->bufT1->list, &vp_act->in_active);
509 pr_err("%s: VP We have no more output buffers\n",
510 __func__);
511 return -EAGAIN;
512 }
513
514 vp_act->bufOut = list_entry(vp_act->out_active.next,
515 struct vcap_buffer, list);
516 list_del(&vp_act->bufOut->list);
517 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
518
519 size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
520 writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
521 writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
522
523 config_in_buffer(c_data, vp_act->bufT2);
524 config_out_buffer(c_data, vp_act->bufOut);
525
526 /* Config VP */
527 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
528 chroma_fmt = 1;
529 writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
530 chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
531
532 chroma_fmt = 0;
533 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
534 chroma_fmt = 1;
535
536 writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
537 chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
538
539 /* Enable Interrupt */
540#ifdef TOP_FIELD_FIX
541 vp_act->top_field = 1;
542#else
543 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
544 top_field = 1;
545#endif
546 vp_act->vp_state = VP_FRAME2;
547 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
548#ifdef TOP_FIELD_FIX
549 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
550 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
551#else
552 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
553 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
554#endif
555 atomic_set(&c_data->dev->vp_enabled, 1);
556 enable_irq(dev->vpirq->start);
557 return 0;
558}
559
560int continue_vp(struct vcap_client_data *c_data)
561{
562 struct vcap_dev *dev;
563 struct vp_action *vp_act;
564 int rc;
565#ifndef TOP_FIELD_FIX
566 bool top_field;
567#endif
568
569 dprintk(2, "Start Continue\n");
570 dev = c_data->dev;
571
572 if (dev->vp_client == NULL) {
573 pr_err("No active vp client\n");
574 return -ENODEV;
575 }
576 vp_act = &dev->vp_client->vid_vp_action;
577
578 if (vp_act->vp_state == VP_UNKNOWN) {
579 pr_err("%s: VP is in an unknown state\n",
580 __func__);
581 return -EAGAIN;
582 }
583
584 rc = vp_setup_buffers(c_data);
585 if (rc < 0)
586 return rc;
587
588#ifndef TOP_FIELD_FIX
589 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
590 top_field = 1;
591#endif
592
593 /* Config VP & Enable Interrupt */
594 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
595#ifdef TOP_FIELD_FIX
596 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
597 writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
598#else
599 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
600 writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
601#endif
602
603 atomic_set(&c_data->dev->vp_enabled, 1);
604 enable_irq(dev->vpirq->start);
605 return 0;
606}