blob: 9f2ead4fe7696768cc4dd2ecb1096f23143792d4 [file] [log] [blame]
Terence Hampsonaeb793e2012-05-11 11:41:16 -04001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/sched.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <mach/camera.h>
19#include <linux/io.h>
20#include <mach/clk.h>
21#include <linux/clk.h>
22
Terence Hampson98d11802012-06-06 18:18:43 -040023#include <media/v4l2-event.h>
Terence Hampsonaeb793e2012-05-11 11:41:16 -040024#include <media/vcap_v4l2.h>
25#include <media/vcap_fmt.h>
26#include "vcap_vp.h"
27
28static unsigned debug;
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "VP: " fmt, ## arg); \
34 } while (0)
35
36void config_nr_buffer(struct vcap_client_data *c_data,
37 struct vcap_buffer *buf)
38{
39 struct vcap_dev *dev = c_data->dev;
40 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
41
42 writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
43 writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
44}
45
46void config_in_buffer(struct vcap_client_data *c_data,
47 struct vcap_buffer *buf)
48{
49 struct vcap_dev *dev = c_data->dev;
50 int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
51
52 writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
53 writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
54}
55
56void config_out_buffer(struct vcap_client_data *c_data,
57 struct vcap_buffer *buf)
58{
59 struct vcap_dev *dev = c_data->dev;
60 int size;
61 size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
62 writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
63 writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
64}
65
66int vp_setup_buffers(struct vcap_client_data *c_data)
67{
68 struct vp_action *vp_act;
69 struct vcap_dev *dev;
70 unsigned long flags = 0;
71
72 if (!c_data->streaming)
73 return -ENOEXEC;
74 dev = c_data->dev;
75 dprintk(2, "Start setup buffers\n");
76
77 /* No need to verify vp_client is not NULL caller does so */
78 vp_act = &dev->vp_client->vid_vp_action;
79
80 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
81 if (list_empty(&vp_act->in_active)) {
82 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
83 dprintk(1, "%s: VP We have no more input buffers\n",
84 __func__);
85 return -EAGAIN;
86 }
87
88 if (list_empty(&vp_act->out_active)) {
89 spin_unlock_irqrestore(&dev->vp_client->cap_slock,
90 flags);
91 dprintk(1, "%s: VP We have no more output buffers\n",
92 __func__);
93 return -EAGAIN;
94 }
95
96 vp_act->bufT2 = list_entry(vp_act->in_active.next,
97 struct vcap_buffer, list);
98 list_del(&vp_act->bufT2->list);
99
100 vp_act->bufOut = list_entry(vp_act->out_active.next,
101 struct vcap_buffer, list);
102 list_del(&vp_act->bufOut->list);
103 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
104
105 config_in_buffer(c_data, vp_act->bufT2);
106 config_out_buffer(c_data, vp_act->bufOut);
107 return 0;
108}
109
110static void mov_buf_to_vc(struct work_struct *work)
111{
112 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
113 struct v4l2_buffer p;
114 struct vb2_buffer *vb_vc;
115 struct vcap_buffer *buf_vc;
116 struct vb2_buffer *vb_vp;
117 struct vcap_buffer *buf_vp;
118 int rc;
119
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400120 p.memory = V4L2_MEMORY_USERPTR;
121
122 /* This loop exits when there is no more buffers left */
123 while (1) {
Terence Hampsona6c96482012-07-06 17:53:09 -0400124 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400125 if (!vp_work->cd->streaming)
126 return;
Terence Hampsona6c96482012-07-06 17:53:09 -0400127 rc = vcvp_dqbuf(&vp_work->cd->vp_in_vidq, &p);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400128 if (rc < 0)
129 return;
130
131 vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
132 if (NULL == vb_vc) {
133 dprintk(1, "%s: buffer is NULL\n", __func__);
Terence Hampsona6c96482012-07-06 17:53:09 -0400134 vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400135 return;
136 }
137 buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
138
139 vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
140 if (NULL == vb_vp) {
141 dprintk(1, "%s: buffer is NULL\n", __func__);
Terence Hampsona6c96482012-07-06 17:53:09 -0400142 vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400143 return;
144 }
145 buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
146 buf_vc->ion_handle = buf_vp->ion_handle;
147 buf_vc->paddr = buf_vp->paddr;
148 buf_vp->ion_handle = NULL;
149 buf_vp->paddr = 0;
150
151 p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
152 /* This call should not fail */
Terence Hampsona6c96482012-07-06 17:53:09 -0400153 rc = vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400154 if (rc < 0) {
155 dprintk(1, "%s: qbuf to vc failed\n", __func__);
156 buf_vp->ion_handle = buf_vc->ion_handle;
157 buf_vp->paddr = buf_vc->paddr;
158 buf_vc->ion_handle = NULL;
159 buf_vc->paddr = 0;
160 p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
Terence Hampsona6c96482012-07-06 17:53:09 -0400161 vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400162 }
163 }
164}
165
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400166void update_nr_value(struct vcap_client_data *c_data)
167{
168 struct vcap_dev *dev = c_data->dev;
169 struct nr_param *par;
170 par = &c_data->vid_vp_action.nr_param;
171 if (par->mode == NR_MANUAL) {
172 writel_relaxed(par->window << 24 | par->decay_ratio << 20,
173 VCAP_VP_NR_CONFIG);
174 writel_relaxed(par->luma.max_blend_ratio << 24 |
175 par->luma.scale_diff_ratio << 12 |
176 par->luma.diff_limit_ratio << 8 |
177 par->luma.scale_motion_ratio << 4 |
178 par->luma.blend_limit_ratio << 0,
179 VCAP_VP_NR_LUMA_CONFIG);
180 writel_relaxed(par->chroma.max_blend_ratio << 24 |
181 par->chroma.scale_diff_ratio << 12 |
182 par->chroma.diff_limit_ratio << 8 |
183 par->chroma.scale_motion_ratio << 4 |
184 par->chroma.blend_limit_ratio << 0,
185 VCAP_VP_NR_CHROMA_CONFIG);
186 }
187 c_data->vid_vp_action.nr_update = false;
188}
189
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400190static void vp_wq_fnc(struct work_struct *work)
191{
192 struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
193 struct vcap_dev *dev;
194 struct vp_action *vp_act;
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400195 unsigned long flags = 0;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400196 uint32_t irq;
197 int rc;
198#ifndef TOP_FIELD_FIX
199 bool top_field;
200#endif
201
202 if (vp_work && vp_work->cd && vp_work->cd->dev)
203 dev = vp_work->cd->dev;
204 else
205 return;
206
207 vp_act = &dev->vp_client->vid_vp_action;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400208
209 rc = readl_relaxed(VCAP_OFFSET(0x048));
210 while (!(rc & 0x00000100))
211 rc = readl_relaxed(VCAP_OFFSET(0x048));
212
Terence Hampsonad33c512012-07-16 17:50:00 -0400213 irq = readl_relaxed(VCAP_VP_INT_STATUS);
214
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400215 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
216 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
217
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400218 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
219 if (vp_act->nr_update == true)
220 update_nr_value(dev->vp_client);
221 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
222
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400223 /* Queue the done buffers */
224 if (vp_act->vp_state == VP_NORMAL &&
225 vp_act->bufNR.nr_pos != TM1_BUF) {
226 vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
227 if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
228 queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
229 }
230
231 vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
232
233 /* Cycle to next state */
234 if (vp_act->vp_state != VP_NORMAL)
235 vp_act->vp_state++;
236#ifdef TOP_FIELD_FIX
237 vp_act->top_field = !vp_act->top_field;
238#endif
239
240 /* Cycle Buffers*/
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400241 if (vp_work->cd->vid_vp_action.nr_param.mode) {
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400242 if (vp_act->bufNR.nr_pos == TM1_BUF)
243 vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
244
245 if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
246 vp_act->bufNR.nr_pos++;
247
248 vp_act->bufTm1 = vp_act->bufT0;
249 vp_act->bufT0 = vp_act->bufT1;
250 vp_act->bufT1 = vp_act->bufNRT2;
251 vp_act->bufNRT2 = vp_act->bufT2;
252 config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
253 } else {
254 vp_act->bufTm1 = vp_act->bufT0;
255 vp_act->bufT0 = vp_act->bufT1;
256 vp_act->bufT1 = vp_act->bufT2;
257 }
258
259 rc = vp_setup_buffers(vp_work->cd);
260 if (rc < 0) {
261 /* setup_buf failed because we are waiting for buffers */
262 writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
263 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
264 atomic_set(&dev->vp_enabled, 0);
265 return;
266 }
267
268 /* Config VP */
269#ifndef TOP_FIELD_FIX
270 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
271 top_field = 1;
272#endif
273
274#ifdef TOP_FIELD_FIX
275 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400276 writel_iowmb(0x00010000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400277#else
278 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400279 writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400280#endif
281 enable_irq(dev->vpirq->start);
282 writel_iowmb(irq, VCAP_VP_INT_CLEAR);
283}
284
285irqreturn_t vp_handler(struct vcap_dev *dev)
286{
287 struct vcap_client_data *c_data;
288 struct vp_action *vp_act;
Terence Hampson98d11802012-06-06 18:18:43 -0400289 struct v4l2_event v4l2_evt;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400290 uint32_t irq;
291 int rc;
292
293 irq = readl_relaxed(VCAP_VP_INT_STATUS);
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400294 if (dev->vp_dummy_event == true) {
295 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
296 dev->vp_dummy_complete = true;
297 wake_up(&dev->vp_dummy_waitq);
298 return IRQ_HANDLED;
299 }
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400300
Terence Hampson98d11802012-06-06 18:18:43 -0400301 if (irq & 0x02000000) {
302 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
303 VCAP_VP_REG_R_ERR_EVENT;
304 v4l2_event_queue(dev->vfd, &v4l2_evt);
305 }
306 if (irq & 0x01000000) {
307 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
308 VCAP_VC_LINE_ERR_EVENT;
309 v4l2_event_queue(dev->vfd, &v4l2_evt);
310 }
311 if (irq & 0x00020000) {
312 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
313 VCAP_VP_IN_HEIGHT_ERR_EVENT;
314 v4l2_event_queue(dev->vfd, &v4l2_evt);
315 }
316 if (irq & 0x00010000) {
317 v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
318 VCAP_VP_IN_WIDTH_ERR_EVENT;
319 v4l2_event_queue(dev->vfd, &v4l2_evt);
320 }
321
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400322 dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
Terence Hampsonad33c512012-07-16 17:50:00 -0400323 if (!(irq & (VP_PIC_DONE || VP_MODE_CHANGE))) {
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400324 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
325 pr_err("VP IRQ shows some error\n");
326 return IRQ_HANDLED;
327 }
328
329 if (dev->vp_client == NULL) {
330 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
331 pr_err("VC: There is no active vp client\n");
332 return IRQ_HANDLED;
333 }
334
335 vp_act = &dev->vp_client->vid_vp_action;
336 c_data = dev->vp_client;
337
338 if (vp_act->vp_state == VP_UNKNOWN) {
339 writel_relaxed(irq, VCAP_VP_INT_CLEAR);
340 pr_err("%s: VP is in an unknown state\n",
341 __func__);
342 return -EAGAIN;
343 }
344
345 INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
346 dev->vp_work.cd = c_data;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400347 rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
348
349 disable_irq_nosync(dev->vpirq->start);
350 return IRQ_HANDLED;
351}
352
353void vp_stop_capture(struct vcap_client_data *c_data)
354{
355 struct vcap_dev *dev = c_data->dev;
356
357 writel_iowmb(0x00000000, VCAP_VP_CTRL);
358 flush_workqueue(dev->vcap_wq);
359
360 if (atomic_read(&dev->vp_enabled) == 1)
361 disable_irq(dev->vpirq->start);
362
363 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
364 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
365}
366
367int config_vp_format(struct vcap_client_data *c_data)
368{
369 struct vcap_dev *dev = c_data->dev;
370
371 INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
372 dev->vp_to_vc_work.cd = c_data;
373
374 /* SW restart VP */
375 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
376 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
377
378 /* Film Mode related settings */
379 writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
380 writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
381 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
382 writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
383 writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
384 writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
385
386 writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
387 writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
388 writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
389 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
390 writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
391 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
392 writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
393 writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
394 writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
395 writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
396 writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
397 writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
398 writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
399 writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
400 writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
401 writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
402 writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
403 writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
404 writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
405 writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
406 writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
407 writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
408 writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
409 writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
410 writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
411 writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
412 writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
413 writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
414 writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
415 (c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
416 writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
417
418 return 0;
419}
420
421int init_motion_buf(struct vcap_client_data *c_data)
422{
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400423 int rc;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400424 struct vcap_dev *dev = c_data->dev;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400425 struct ion_handle *handle = NULL;
426 unsigned long paddr, ionflag = 0;
427 void *vaddr;
428 size_t len;
429 size_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400430 ((c_data->vp_out_fmt.height + 7) >> 3) * 16;
431
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400432 if (c_data->vid_vp_action.motionHandle) {
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400433 pr_err("Motion buffer has already been created");
434 return -ENOEXEC;
435 }
436
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400437 handle = ion_alloc(dev->ion_client, size, SZ_4K,
438 ION_HEAP(ION_CP_MM_HEAP_ID));
439 if (IS_ERR_OR_NULL(handle)) {
440 pr_err("%s: ion_alloc failed\n", __func__);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400441 return -ENOMEM;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400442 }
443 rc = ion_phys(dev->ion_client, handle, &paddr, &len);
444 if (rc < 0) {
445 pr_err("%s: ion_phys failed\n", __func__);
446 ion_free(dev->ion_client, handle);
447 return rc;
448 }
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400449
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400450 rc = ion_handle_get_flags(dev->ion_client, handle, &ionflag);
451 if (rc) {
452 pr_err("%s: get flags ion handle failed\n", __func__);
453 ion_free(dev->ion_client, handle);
454 return rc;
455 }
456
457 vaddr = ion_map_kernel(dev->ion_client, handle, ionflag);
458 if (IS_ERR(vaddr)) {
459 pr_err("%s: Map motion buffer failed\n", __func__);
460 ion_free(dev->ion_client, handle);
461 rc = -ENOMEM;
462 return rc;
463 }
464
465 memset(vaddr, 0, size);
466 c_data->vid_vp_action.motionHandle = handle;
467
468 vaddr = NULL;
469 ion_unmap_kernel(dev->ion_client, handle);
470
471 writel_iowmb(paddr, VCAP_VP_MOTION_EST_ADDR);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400472 return 0;
473}
474
475void deinit_motion_buf(struct vcap_client_data *c_data)
476{
477 struct vcap_dev *dev = c_data->dev;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400478 if (!c_data->vid_vp_action.motionHandle) {
Terence Hampsonad33c512012-07-16 17:50:00 -0400479 pr_err("Motion buffer has not been created");
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400480 return;
481 }
482
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400483 writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400484 ion_free(dev->ion_client, c_data->vid_vp_action.motionHandle);
485 c_data->vid_vp_action.motionHandle = NULL;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400486 return;
487}
488
489int init_nr_buf(struct vcap_client_data *c_data)
490{
491 struct vcap_dev *dev = c_data->dev;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400492 struct ion_handle *handle = NULL;
493 size_t frame_size, tot_size, len;
494 unsigned long paddr;
495 int rc;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400496
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400497 if (c_data->vid_vp_action.bufNR.nr_handle) {
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400498 pr_err("NR buffer has already been created");
499 return -ENOEXEC;
500 }
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400501
502 frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
503 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
504 tot_size = frame_size * 2;
505 else
506 tot_size = frame_size / 2 * 3;
507
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400508 handle = ion_alloc(dev->ion_client, tot_size, SZ_4K,
509 ION_HEAP(ION_CP_MM_HEAP_ID));
510 if (IS_ERR_OR_NULL(handle)) {
511 pr_err("%s: ion_alloc failed\n", __func__);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400512 return -ENOMEM;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400513 }
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400514
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400515 rc = ion_phys(dev->ion_client, handle, &paddr, &len);
516 if (rc < 0) {
517 pr_err("%s: ion_phys failed\n", __func__);
518 ion_free(dev->ion_client, handle);
519 return rc;
520 }
521
522 c_data->vid_vp_action.bufNR.nr_handle = handle;
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400523 update_nr_value(c_data);
524
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400525 c_data->vid_vp_action.bufNR.paddr = paddr;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400526 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400527 rc |= (((c_data->vp_out_fmt.width / 16) << 20) | 0x1);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400528 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400529 writel_relaxed(paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
530 writel_relaxed(paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
531 c_data->vid_vp_action.bufNR.nr_pos = NRT2_BUF;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400532 return 0;
533}
534
535void deinit_nr_buf(struct vcap_client_data *c_data)
536{
537 struct vcap_dev *dev = c_data->dev;
538 struct nr_buffer *buf;
539 uint32_t rc;
540
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400541 if (!c_data->vid_vp_action.bufNR.nr_handle) {
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400542 pr_err("NR buffer has not been created");
543 return;
544 }
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400545 buf = &c_data->vid_vp_action.bufNR;
546
547 rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400548 rc &= !(0x0FF00001);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400549 writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
550
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400551 ion_free(dev->ion_client, buf->nr_handle);
552 buf->nr_handle = NULL;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400553 buf->paddr = 0;
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400554 return;
555}
556
Terence Hampson3dff4ef2012-06-13 15:20:59 -0400557int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param)
558{
559 if (param->mode != NR_MANUAL)
560 return 0;
561
562 /* Verify values in range */
563 if (param->window < VP_NR_MAX_WINDOW)
564 return -EINVAL;
565 if (param->luma.max_blend_ratio < VP_NR_MAX_RATIO)
566 return -EINVAL;
567 if (param->luma.scale_diff_ratio < VP_NR_MAX_RATIO)
568 return -EINVAL;
569 if (param->luma.diff_limit_ratio < VP_NR_MAX_RATIO)
570 return -EINVAL;
571 if (param->luma.scale_motion_ratio < VP_NR_MAX_RATIO)
572 return -EINVAL;
573 if (param->luma.blend_limit_ratio < VP_NR_MAX_RATIO)
574 return -EINVAL;
575 if (param->chroma.max_blend_ratio < VP_NR_MAX_RATIO)
576 return -EINVAL;
577 if (param->chroma.scale_diff_ratio < VP_NR_MAX_RATIO)
578 return -EINVAL;
579 if (param->chroma.diff_limit_ratio < VP_NR_MAX_RATIO)
580 return -EINVAL;
581 if (param->chroma.scale_motion_ratio < VP_NR_MAX_RATIO)
582 return -EINVAL;
583 if (param->chroma.blend_limit_ratio < VP_NR_MAX_RATIO)
584 return -EINVAL;
585 return 0;
586}
587
588void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param)
589{
590 struct vcap_dev *dev = c_data->dev;
591 uint32_t rc;
592 rc = readl_relaxed(VCAP_VP_NR_CONFIG);
593 param->window = BITS_VALUE(rc, 24, 4);
594 param->decay_ratio = BITS_VALUE(rc, 20, 3);
595
596 rc = readl_relaxed(VCAP_VP_NR_LUMA_CONFIG);
597 param->luma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
598 param->luma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
599 param->luma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
600 param->luma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
601 param->luma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
602
603 rc = readl_relaxed(VCAP_VP_NR_CHROMA_CONFIG);
604 param->chroma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
605 param->chroma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
606 param->chroma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
607 param->chroma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
608 param->chroma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
609}
610
611void s_default_nr_val(struct nr_param *param)
612{
613 param->window = 10;
614 param->decay_ratio = 0;
615 param->luma.max_blend_ratio = 0;
616 param->luma.scale_diff_ratio = 4;
617 param->luma.diff_limit_ratio = 1;
618 param->luma.scale_motion_ratio = 4;
619 param->luma.blend_limit_ratio = 9;
620 param->chroma.max_blend_ratio = 0;
621 param->chroma.scale_diff_ratio = 4;
622 param->chroma.diff_limit_ratio = 1;
623 param->chroma.scale_motion_ratio = 4;
624 param->chroma.blend_limit_ratio = 9;
625}
626
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400627int vp_dummy_event(struct vcap_client_data *c_data)
628{
629 struct vcap_dev *dev = c_data->dev;
630 unsigned int width, height;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400631 struct ion_handle *handle = NULL;
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400632 unsigned long paddr;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400633 size_t len;
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400634 uint32_t reg;
635 int rc = 0;
636
637 dprintk(2, "%s: Start VP dummy event\n", __func__);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400638 handle = ion_alloc(dev->ion_client, 0x1200, SZ_4K,
639 ION_HEAP(ION_CP_MM_HEAP_ID));
640 if (IS_ERR_OR_NULL(handle)) {
641 pr_err("%s: ion_alloc failed\n", __func__);
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400642 return -ENOMEM;
643 }
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400644
645 rc = ion_phys(dev->ion_client, handle, &paddr, &len);
646 if (rc < 0) {
647 pr_err("%s: ion_phys failed\n", __func__);
648 ion_free(dev->ion_client, handle);
649 return rc;
650 }
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400651
652 width = c_data->vp_out_fmt.width;
653 height = c_data->vp_out_fmt.height;
654
655 c_data->vp_out_fmt.width = 0x3F;
656 c_data->vp_out_fmt.height = 0x16;
657
658 config_vp_format(c_data);
659 writel_relaxed(paddr, VCAP_VP_T1_Y_BASE_ADDR);
660 writel_relaxed(paddr + 0x2C0, VCAP_VP_T1_C_BASE_ADDR);
661 writel_relaxed(paddr + 0x440, VCAP_VP_T2_Y_BASE_ADDR);
662 writel_relaxed(paddr + 0x700, VCAP_VP_T2_C_BASE_ADDR);
663 writel_relaxed(paddr + 0x880, VCAP_VP_OUT_Y_BASE_ADDR);
664 writel_relaxed(paddr + 0xB40, VCAP_VP_OUT_C_BASE_ADDR);
665 writel_iowmb(paddr + 0x1100, VCAP_VP_MOTION_EST_ADDR);
666 writel_relaxed(4 << 20 | 0x2 << 4, VCAP_VP_IN_CONFIG);
667 writel_relaxed(4 << 20 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
668
669 dev->vp_dummy_event = true;
670
671 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
672 writel_iowmb(0x00000000, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400673 writel_iowmb(0x00010000, VCAP_VP_CTRL);
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400674
675 enable_irq(dev->vpirq->start);
676 rc = wait_event_interruptible_timeout(dev->vp_dummy_waitq,
677 dev->vp_dummy_complete, msecs_to_jiffies(50));
678 if (!rc && !dev->vp_dummy_complete) {
679 pr_err("%s: VP dummy event timeout", __func__);
680 rc = -ETIME;
681 writel_iowmb(0x00000000, VCAP_VP_CTRL);
682
683 writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
684 writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
685 dev->vp_dummy_complete = false;
686 }
687
688 writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
689 disable_irq(dev->vpirq->start);
690 dev->vp_dummy_event = false;
691
692 reg = readl_relaxed(VCAP_OFFSET(0x0D94));
693 writel_relaxed(reg, VCAP_OFFSET(0x0D9C));
694
695 c_data->vp_out_fmt.width = width;
696 c_data->vp_out_fmt.height = height;
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400697 ion_free(dev->ion_client, handle);
Terence Hampson54d3a6d2012-07-10 14:05:35 -0400698
699 dprintk(2, "%s: Exit VP dummy event\n", __func__);
700 return rc;
701}
702
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400703int kickoff_vp(struct vcap_client_data *c_data)
704{
705 struct vcap_dev *dev;
706 struct vp_action *vp_act;
707 unsigned long flags = 0;
708 unsigned int chroma_fmt = 0;
709 int size;
710#ifndef TOP_FIELD_FIX
711 bool top_field;
712#endif
713
714 if (!c_data->streaming)
715 return -ENOEXEC;
716
717 dev = c_data->dev;
718 dprintk(2, "Start Kickoff\n");
719
720 if (dev->vp_client == NULL) {
721 pr_err("No active vp client\n");
722 return -ENODEV;
723 }
724 vp_act = &dev->vp_client->vid_vp_action;
725
726 spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
727 if (list_empty(&vp_act->in_active)) {
728 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
729 pr_err("%s: VP We have no more input buffers\n",
730 __func__);
731 return -EAGAIN;
732 }
733
734 vp_act->bufT1 = list_entry(vp_act->in_active.next,
735 struct vcap_buffer, list);
736 list_del(&vp_act->bufT1->list);
737
738 if (list_empty(&vp_act->in_active)) {
739 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
740 list_add(&vp_act->bufT1->list, &vp_act->in_active);
741 pr_err("%s: VP We have no more input buffers\n",
742 __func__);
743 return -EAGAIN;
744 }
745
746 vp_act->bufT2 = list_entry(vp_act->in_active.next,
747 struct vcap_buffer, list);
748 list_del(&vp_act->bufT2->list);
749
750 if (list_empty(&vp_act->out_active)) {
751 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
752 list_add(&vp_act->bufT2->list, &vp_act->in_active);
753 list_add(&vp_act->bufT1->list, &vp_act->in_active);
754 pr_err("%s: VP We have no more output buffers\n",
755 __func__);
756 return -EAGAIN;
757 }
758
759 vp_act->bufOut = list_entry(vp_act->out_active.next,
760 struct vcap_buffer, list);
761 list_del(&vp_act->bufOut->list);
762 spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
763
764 size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
765 writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
766 writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
767
768 config_in_buffer(c_data, vp_act->bufT2);
769 config_out_buffer(c_data, vp_act->bufOut);
770
771 /* Config VP */
772 if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
773 chroma_fmt = 1;
774 writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
775 chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
776
777 chroma_fmt = 0;
Terence Hampson779dc762012-06-07 15:59:27 -0400778 if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400779 chroma_fmt = 1;
780
Terence Hampsonad33c512012-07-16 17:50:00 -0400781 writel_relaxed((c_data->vp_out_fmt.width / 16) << 20 |
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400782 chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
783
784 /* Enable Interrupt */
785#ifdef TOP_FIELD_FIX
786 vp_act->top_field = 1;
787#else
788 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
789 top_field = 1;
790#endif
791 vp_act->vp_state = VP_FRAME2;
792 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
793#ifdef TOP_FIELD_FIX
794 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400795 writel_iowmb(0x00010000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400796#else
797 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400798 writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400799#endif
800 atomic_set(&c_data->dev->vp_enabled, 1);
801 enable_irq(dev->vpirq->start);
802 return 0;
803}
804
805int continue_vp(struct vcap_client_data *c_data)
806{
807 struct vcap_dev *dev;
808 struct vp_action *vp_act;
809 int rc;
810#ifndef TOP_FIELD_FIX
811 bool top_field;
812#endif
813
814 dprintk(2, "Start Continue\n");
815 dev = c_data->dev;
816
817 if (dev->vp_client == NULL) {
818 pr_err("No active vp client\n");
819 return -ENODEV;
820 }
821 vp_act = &dev->vp_client->vid_vp_action;
822
823 if (vp_act->vp_state == VP_UNKNOWN) {
824 pr_err("%s: VP is in an unknown state\n",
825 __func__);
826 return -EAGAIN;
827 }
828
829 rc = vp_setup_buffers(c_data);
830 if (rc < 0)
831 return rc;
832
833#ifndef TOP_FIELD_FIX
834 if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
835 top_field = 1;
836#endif
837
838 /* Config VP & Enable Interrupt */
839 writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
840#ifdef TOP_FIELD_FIX
841 writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400842 writel_iowmb(0x00010000 | vp_act->top_field << 0, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400843#else
844 writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
Terence Hampsona2cba0d2012-08-08 11:39:53 -0400845 writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
Terence Hampsonaeb793e2012-05-11 11:41:16 -0400846#endif
847
848 atomic_set(&c_data->dev->vp_enabled, 1);
849 enable_irq(dev->vpirq->start);
850 return 0;
851}