blob: 16db81c18f4f3545171731c2388c5d469a373afa [file] [log] [blame]
Srinu Gorlecf8c6752018-01-19 18:36:13 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <media/msm_vidc.h>
17#include "msm_vidc_internal.h"
18#include "msm_vidc_debug.h"
19#include "msm_vdec.h"
20#include "msm_venc.h"
21#include "msm_vidc_common.h"
22#include <linux/delay.h>
23#include "vidc_hfi_api.h"
24#include "msm_vidc_dcvs.h"
25
26#define MAX_EVENTS 30
27
28static int get_poll_flags(void *instance)
29{
30 struct msm_vidc_inst *inst = instance;
31 struct vb2_queue *outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
32 struct vb2_queue *capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
33 struct vb2_buffer *out_vb = NULL;
34 struct vb2_buffer *cap_vb = NULL;
35 unsigned long flags;
36 int rc = 0;
37
38 if (v4l2_event_pending(&inst->event_handler))
39 rc |= POLLPRI;
40
41 spin_lock_irqsave(&capq->done_lock, flags);
42 if (!list_empty(&capq->done_list))
43 cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
44 done_entry);
45 if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE
46 || cap_vb->state == VB2_BUF_STATE_ERROR))
47 rc |= POLLIN | POLLRDNORM;
48 spin_unlock_irqrestore(&capq->done_lock, flags);
49
50 spin_lock_irqsave(&outq->done_lock, flags);
51 if (!list_empty(&outq->done_list))
52 out_vb = list_first_entry(&outq->done_list, struct vb2_buffer,
53 done_entry);
54 if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE
55 || out_vb->state == VB2_BUF_STATE_ERROR))
56 rc |= POLLOUT | POLLWRNORM;
57 spin_unlock_irqrestore(&outq->done_lock, flags);
58
59 return rc;
60}
61
62int msm_vidc_poll(void *instance, struct file *filp,
63 struct poll_table_struct *wait)
64{
65 struct msm_vidc_inst *inst = instance;
66 struct vb2_queue *outq = NULL;
67 struct vb2_queue *capq = NULL;
68
69 if (!inst)
70 return -EINVAL;
71
72 outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
73 capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
74
75 poll_wait(filp, &inst->event_handler.wait, wait);
76 poll_wait(filp, &capq->done_wq, wait);
77 poll_wait(filp, &outq->done_wq, wait);
78 return get_poll_flags(inst);
79}
80EXPORT_SYMBOL(msm_vidc_poll);
81
82int msm_vidc_querycap(void *instance, struct v4l2_capability *cap)
83{
Surajit Podder90d5fc22018-02-16 20:33:47 +053084 int rc = -EINVAL;
Srinu Gorlecf8c6752018-01-19 18:36:13 +053085 struct msm_vidc_inst *inst = instance;
86
87 if (!inst || !cap)
88 return -EINVAL;
89
90 if (inst->session_type == MSM_VIDC_DECODER)
Surajit Podder90d5fc22018-02-16 20:33:47 +053091 rc = msm_vdec_querycap(instance, cap);
Srinu Gorlecf8c6752018-01-19 18:36:13 +053092 else if (inst->session_type == MSM_VIDC_ENCODER)
Surajit Podder90d5fc22018-02-16 20:33:47 +053093 rc = msm_venc_querycap(instance, cap);
94 else
95 goto exit;
96 if (!rc) {
97 cap->device_caps = cap->capabilities;
98 cap->capabilities |= V4L2_CAP_DEVICE_CAPS;
99 }
100exit:
101 return rc;
Srinu Gorlecf8c6752018-01-19 18:36:13 +0530102}
103EXPORT_SYMBOL(msm_vidc_querycap);
104
105int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
106{
107 struct msm_vidc_inst *inst = instance;
108
109 if (!inst || !f)
110 return -EINVAL;
111
112 if (inst->session_type == MSM_VIDC_DECODER)
113 return msm_vdec_enum_fmt(instance, f);
114 else if (inst->session_type == MSM_VIDC_ENCODER)
115 return msm_venc_enum_fmt(instance, f);
116 return -EINVAL;
117}
118EXPORT_SYMBOL(msm_vidc_enum_fmt);
119
120int msm_vidc_s_fmt(void *instance, struct v4l2_format *f)
121{
122 struct msm_vidc_inst *inst = instance;
123
124 if (!inst || !f)
125 return -EINVAL;
126
127 if (inst->session_type == MSM_VIDC_DECODER)
128 return msm_vdec_s_fmt(instance, f);
129 if (inst->session_type == MSM_VIDC_ENCODER)
130 return msm_venc_s_fmt(instance, f);
131 return -EINVAL;
132}
133EXPORT_SYMBOL(msm_vidc_s_fmt);
134
135int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
136{
137 struct msm_vidc_inst *inst = instance;
138
139 if (!inst || !f)
140 return -EINVAL;
141
142 if (inst->session_type == MSM_VIDC_DECODER)
143 return msm_vdec_g_fmt(instance, f);
144 else if (inst->session_type == MSM_VIDC_ENCODER)
145 return msm_venc_g_fmt(instance, f);
146 return -EINVAL;
147}
148EXPORT_SYMBOL(msm_vidc_g_fmt);
149
150int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
151{
152 struct msm_vidc_inst *inst = instance;
153
154 if (!inst || !control)
155 return -EINVAL;
156
157 return msm_comm_s_ctrl(instance, control);
158}
159EXPORT_SYMBOL(msm_vidc_s_ctrl);
160
161int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
162{
163 struct msm_vidc_inst *inst = instance;
164
165 if (!inst || !control)
166 return -EINVAL;
167
168 return msm_comm_g_ctrl(instance, control);
169}
170EXPORT_SYMBOL(msm_vidc_g_ctrl);
171
172int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
173{
174 struct msm_vidc_inst *inst = instance;
175
176 if (!inst || !control)
177 return -EINVAL;
178
179 if (inst->session_type == MSM_VIDC_DECODER)
180 return msm_vdec_s_ext_ctrl(instance, control);
181 if (inst->session_type == MSM_VIDC_ENCODER)
182 return msm_venc_s_ext_ctrl(instance, control);
183 return -EINVAL;
184}
185EXPORT_SYMBOL(msm_vidc_s_ext_ctrl);
186
187int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b)
188{
189 struct msm_vidc_inst *inst = instance;
190
191 if (!inst || !b)
192 return -EINVAL;
193
194 if (inst->session_type == MSM_VIDC_DECODER)
195 return msm_vdec_reqbufs(instance, b);
196 if (inst->session_type == MSM_VIDC_ENCODER)
197 return msm_venc_reqbufs(instance, b);
198 return -EINVAL;
199}
200EXPORT_SYMBOL(msm_vidc_reqbufs);
201
202struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
203 struct v4l2_buffer *b, int idx, int *plane)
204{
205 struct buffer_info *temp;
206 struct buffer_info *ret = NULL;
207 int i;
208 int fd = b->m.planes[idx].reserved[0];
209 u32 buff_off = b->m.planes[idx].reserved[1];
210 u32 size = b->m.planes[idx].length;
211 ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr;
212
213 if (fd < 0 || !plane) {
214 dprintk(VIDC_ERR, "Invalid input\n");
215 goto err_invalid_input;
216 }
217
218 WARN(!mutex_is_locked(&inst->registeredbufs.lock),
219 "Registered buf lock is not acquired for %s", __func__);
220
221 *plane = 0;
222 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
223 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
224 bool ion_hndl_matches = temp->handle[i] ?
225 msm_smem_compare_buffers(inst->mem_client, fd,
226 temp->handle[i]->smem_priv) : false;
227 bool device_addr_matches = device_addr ==
228 temp->device_addr[i];
229 bool contains_within = CONTAINS(temp->buff_off[i],
230 temp->size[i], buff_off) ||
231 CONTAINS(buff_off, size, temp->buff_off[i]);
232 bool overlaps = OVERLAPS(buff_off, size,
233 temp->buff_off[i], temp->size[i]);
234
235 if (!temp->inactive &&
236 (ion_hndl_matches || device_addr_matches) &&
237 (contains_within || overlaps)) {
238 dprintk(VIDC_DBG,
239 "This memory region is already mapped\n");
240 ret = temp;
241 *plane = i;
242 break;
243 }
244 }
245 if (ret)
246 break;
247 }
248
249err_invalid_input:
250 return ret;
251}
252
253static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
254{
255 struct buffer_info *temp;
256 struct msm_smem *same_fd_handle = NULL;
257
258 int i;
259
260 if (!fd)
261 return NULL;
262
263 if (!inst || fd < 0) {
264 dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
265 goto err_invalid_input;
266 }
267
268 mutex_lock(&inst->registeredbufs.lock);
269 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
270 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
271 bool ion_hndl_matches = temp->handle[i] ?
272 msm_smem_compare_buffers(inst->mem_client, fd,
273 temp->handle[i]->smem_priv) : false;
274 if (ion_hndl_matches && temp->mapped[i]) {
275 temp->same_fd_ref[i]++;
276 dprintk(VIDC_INFO,
277 "Found same fd buffer\n");
278 same_fd_handle = temp->handle[i];
279 break;
280 }
281 }
282 if (same_fd_handle)
283 break;
284 }
285 mutex_unlock(&inst->registeredbufs.lock);
286
287err_invalid_input:
288 return same_fd_handle;
289}
290
291struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
292 ion_phys_addr_t device_addr)
293{
294 struct buffer_info *temp = NULL;
295 bool found = false;
296 int i;
297
298 if (!buf_list || !device_addr) {
299 dprintk(VIDC_ERR,
300 "Invalid input- device_addr: %pa buf_list: %pK\n",
301 &device_addr, buf_list);
302 goto err_invalid_input;
303 }
304
305 mutex_lock(&buf_list->lock);
306 list_for_each_entry(temp, &buf_list->list, list) {
307 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
308 if (!temp->inactive &&
309 temp->device_addr[i] == device_addr) {
310 dprintk(VIDC_INFO,
311 "Found same fd buffer\n");
312 found = true;
313 break;
314 }
315 }
316
317 if (found)
318 break;
319 }
320 mutex_unlock(&buf_list->lock);
321
322err_invalid_input:
323 return temp;
324}
325
326static inline void populate_buf_info(struct buffer_info *binfo,
327 struct v4l2_buffer *b, u32 i)
328{
329 if (i >= VIDEO_MAX_PLANES) {
330 dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
331 return;
332 }
333 binfo->type = b->type;
334 binfo->fd[i] = b->m.planes[i].reserved[0];
335 binfo->buff_off[i] = b->m.planes[i].reserved[1];
336 binfo->size[i] = b->m.planes[i].length;
337 binfo->uvaddr[i] = b->m.planes[i].m.userptr;
338 binfo->num_planes = b->length;
339 binfo->memory = b->memory;
340 binfo->v4l2_index = b->index;
341 binfo->timestamp.tv_sec = b->timestamp.tv_sec;
342 binfo->timestamp.tv_usec = b->timestamp.tv_usec;
343 dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d",
344 __func__, i, binfo->fd[0], b->index);
345}
346
347static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
348 struct buffer_info *binfo)
349{
350 int i = 0;
351
352 b->type = binfo->type;
353 b->length = binfo->num_planes;
354 b->memory = binfo->memory;
355 b->index = binfo->v4l2_index;
356 b->timestamp.tv_sec = binfo->timestamp.tv_sec;
357 b->timestamp.tv_usec = binfo->timestamp.tv_usec;
358 binfo->dequeued = false;
359 for (i = 0; i < binfo->num_planes; ++i) {
360 b->m.planes[i].reserved[0] = binfo->fd[i];
361 b->m.planes[i].reserved[1] = binfo->buff_off[i];
362 b->m.planes[i].length = binfo->size[i];
363 b->m.planes[i].m.userptr = binfo->device_addr[i];
364 dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i],
365 binfo->buff_off[i], binfo->size[i],
366 &binfo->device_addr[i]);
367 }
368}
369
370static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
371 struct v4l2_plane *p, enum hal_buffer buffer_type)
372{
373 struct msm_smem *handle = NULL;
374
375 handle = msm_comm_smem_user_to_kernel(inst,
376 p->reserved[0],
377 p->reserved[1],
378 buffer_type);
379 if (!handle) {
380 dprintk(VIDC_ERR,
381 "%s: Failed to get device buffer address\n", __func__);
382 return NULL;
383 }
384 return handle;
385}
386
387static inline enum hal_buffer get_hal_buffer_type(
388 struct msm_vidc_inst *inst, struct v4l2_buffer *b)
389{
390 if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
391 return HAL_BUFFER_INPUT;
392 else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
393 return HAL_BUFFER_OUTPUT;
394 else
395 return -EINVAL;
396}
397
398static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
399 struct msm_vidc_inst *inst)
400{
401 return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
402 inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
403}
404
405
406static inline bool is_encoder_input_buffer(struct v4l2_buffer *b,
407 struct msm_vidc_inst *inst)
408{
409 return b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
410 inst->session_type == MSM_VIDC_ENCODER;
411}
412
413static inline void save_v4l2_buffer(struct v4l2_buffer *b,
414 struct buffer_info *binfo)
415{
416 int i = 0;
417
418 for (i = 0; i < b->length; ++i) {
419 if (EXTRADATA_IDX(b->length) &&
420 (i == EXTRADATA_IDX(b->length)) &&
421 !b->m.planes[i].length) {
422 continue;
423 }
424 populate_buf_info(binfo, b, i);
425 }
426}
427
428int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
429{
430 struct buffer_info *binfo = NULL;
431 struct buffer_info *temp = NULL, *iterator = NULL;
432 int plane = 0;
433 int i = 0, rc = 0;
434 struct msm_smem *same_fd_handle = NULL;
435
436 if (!b || !inst) {
437 dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
438 return -EINVAL;
439 }
440
441 binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
442 if (!binfo) {
443 dprintk(VIDC_ERR, "Out of memory\n");
444 rc = -ENOMEM;
445 goto exit;
446 }
447 if (b->length > VIDEO_MAX_PLANES) {
448 dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
449 b->length, VIDEO_MAX_PLANES);
450 rc = -EINVAL;
451 goto exit;
452 }
453
454 dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
455 binfo, b->m.planes[0].reserved[0], b->type);
456
457 for (i = 0; i < b->length; ++i) {
458 rc = 0;
459 if (EXTRADATA_IDX(b->length) &&
460 (i == EXTRADATA_IDX(b->length)) &&
461 !b->m.planes[i].length) {
462 continue;
463 }
464 mutex_lock(&inst->registeredbufs.lock);
465 temp = get_registered_buf(inst, b, i, &plane);
466 if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
467 dprintk(VIDC_DBG,
468 "This memory region has already been prepared\n");
469 rc = 0;
470 mutex_unlock(&inst->registeredbufs.lock);
471 goto exit;
472 }
473
474 if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
475 /*
476 * Buffer is already present in registered list
477 * increment ref_count, populate new values of v4l2
478 * buffer in existing buffer_info struct.
479 *
480 * We will use the saved buffer info and queue it when
481 * we receive RELEASE_BUFFER_REFERENCE EVENT from f/w.
482 */
483 dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n");
484 temp->inactive = false;
485 list_for_each_entry(iterator,
486 &inst->registeredbufs.list, list) {
487 if (iterator == temp) {
488 rc = buf_ref_get(inst, temp);
489 save_v4l2_buffer(b, temp);
490 break;
491 }
492 }
493 }
494 mutex_unlock(&inst->registeredbufs.lock);
495 /*
496 * rc == 1,
497 * buffer is mapped, fw has released all reference, so skip
498 * mapping and queue it immediately.
499 *
500 * rc == 2,
501 * buffer is mapped and fw is holding a reference, hold it in
502 * the driver and queue it later when fw has released
503 */
504 if (rc == 1) {
505 rc = 0;
506 goto exit;
507 } else if (rc == 2) {
508 rc = -EEXIST;
509 goto exit;
510 }
511
512 same_fd_handle = get_same_fd_buffer(
513 inst, b->m.planes[i].reserved[0]);
514
515 populate_buf_info(binfo, b, i);
516 if (same_fd_handle) {
517 binfo->device_addr[i] =
518 same_fd_handle->device_addr + binfo->buff_off[i];
519 b->m.planes[i].m.userptr = binfo->device_addr[i];
520 binfo->mapped[i] = false;
521 binfo->handle[i] = same_fd_handle;
522 } else {
523 binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
524 get_hal_buffer_type(inst, b));
525 if (!binfo->handle[i]) {
526 rc = -EINVAL;
527 goto exit;
528 }
529
530 binfo->mapped[i] = true;
531 binfo->device_addr[i] = binfo->handle[i]->device_addr +
532 binfo->buff_off[i];
533 b->m.planes[i].m.userptr = binfo->device_addr[i];
534 }
535
536 /* We maintain one ref count for all planes*/
537 if (!i && is_dynamic_output_buffer_mode(b, inst)) {
538 rc = buf_ref_get(inst, binfo);
539 if (rc < 0)
540 goto exit;
541 }
542 dprintk(VIDC_DBG,
543 "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
544 __func__, binfo, i, binfo->handle[i],
545 &binfo->device_addr[i], binfo->fd[i],
546 binfo->buff_off[i], binfo->mapped[i]);
547 }
548
549 mutex_lock(&inst->registeredbufs.lock);
550 list_add_tail(&binfo->list, &inst->registeredbufs.list);
551 mutex_unlock(&inst->registeredbufs.lock);
552 return 0;
553
554exit:
555 kfree(binfo);
556 return rc;
557}
558int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
559 struct buffer_info *binfo)
560{
561 int i = 0;
562 struct buffer_info *temp = NULL;
563 bool found = false, keep_node = false;
564
565 if (!inst || !binfo) {
566 dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n",
567 __func__, inst, binfo);
568 return -EINVAL;
569 }
570
571 WARN(!mutex_is_locked(&inst->registeredbufs.lock),
572 "Registered buf lock is not acquired for %s", __func__);
573
574 /*
575 * Make sure the buffer to be unmapped and deleted
576 * from the registered list is present in the list.
577 */
578 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
579 if (temp == binfo) {
580 found = true;
581 break;
582 }
583 }
584
585 /*
586 * Free the buffer info only if
587 * - buffer info has not been deleted from registered list
588 * - vidc client has called dqbuf on the buffer
589 * - no references are held on the buffer
590 */
591 if (!found || !temp || !temp->pending_deletion || !temp->dequeued)
592 goto exit;
593
594 for (i = 0; i < temp->num_planes; i++) {
595 dprintk(VIDC_DBG,
596 "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
597 __func__, temp, i, temp->handle[i],
598 &temp->device_addr[i], temp->fd[i],
599 temp->buff_off[i], temp->mapped[i]);
600 /*
601 * Unmap the handle only if the buffer has been mapped and no
602 * other buffer has a reference to this buffer.
603 * In case of buffers with same fd, we will map the buffer only
604 * once and subsequent buffers will refer to the mapped buffer's
605 * device address.
606 * For buffers which share the same fd, do not unmap and keep
607 * the buffer info in registered list.
608 */
609 if (temp->handle[i] && temp->mapped[i] &&
610 !temp->same_fd_ref[i]) {
611 msm_comm_smem_free(inst,
612 temp->handle[i]);
613 }
614
615 if (temp->same_fd_ref[i])
616 keep_node = true;
617 else {
618 temp->fd[i] = 0;
619 temp->handle[i] = 0;
620 temp->device_addr[i] = 0;
621 temp->uvaddr[i] = 0;
622 }
623 }
624 if (!keep_node) {
625 dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp);
626 list_del(&temp->list);
627 kfree(temp);
628 } else {
629 temp->inactive = true;
630 dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp);
631 }
632exit:
633 return 0;
634}
635
636
637int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
638 struct buffer_info *binfo)
639{
640 struct v4l2_buffer b = {0};
641 struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} };
642
643 if (!binfo) {
644 dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo);
645 return -EINVAL;
646 }
647 dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]);
648
649 b.m.planes = plane;
650 repopulate_v4l2_buffer(&b, binfo);
651
652 if (inst->session_type == MSM_VIDC_DECODER)
653 return msm_vdec_qbuf(inst, &b);
654 if (inst->session_type == MSM_VIDC_ENCODER)
655 return msm_venc_qbuf(inst, &b);
656
657 return -EINVAL;
658}
659
660int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
661 struct buffer_info *binfo)
662{
663 int i = 0;
664 int rc = 0;
665
666 if (!inst) {
667 dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
668 return -EINVAL;
669 }
670
671 if (!binfo) {
672 dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n",
673 __func__, inst);
674 return -EINVAL;
675 }
676
677 if (binfo->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
678 return 0;
679
680
681 for (i = 0; i < binfo->num_planes; i++) {
682 if (binfo->handle[i]) {
683 rc = msm_comm_smem_cache_operations(inst,
684 binfo->handle[i], SMEM_CACHE_INVALIDATE);
685 if (rc) {
686 dprintk(VIDC_ERR,
687 "%s: Failed to clean caches: %d\n",
688 __func__, rc);
689 return -EINVAL;
690 }
691 } else
692 dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n",
693 __func__, i);
694 }
695 return 0;
696}
697
698static bool valid_v4l2_buffer(struct v4l2_buffer *b,
699 struct msm_vidc_inst *inst) {
700 enum vidc_ports port =
701 !V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
702 b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
703 b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
704 MAX_PORT_NUM;
705
706 return port != MAX_PORT_NUM &&
707 inst->fmts[port].num_planes == b->length;
708}
709
710int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
711{
712 struct msm_vidc_inst *inst = instance;
713
714 if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
715 return -EINVAL;
716
717 if (inst->state == MSM_VIDC_CORE_INVALID ||
718 inst->core->state == VIDC_CORE_INVALID)
719 return -EINVAL;
720
721 if (is_dynamic_output_buffer_mode(b, inst))
722 return 0;
723
724 if (map_and_register_buf(inst, b))
725 return -EINVAL;
726
727 if (inst->session_type == MSM_VIDC_DECODER)
728 return msm_vdec_prepare_buf(instance, b);
729 if (inst->session_type == MSM_VIDC_ENCODER)
730 return msm_venc_prepare_buf(instance, b);
731 return -EINVAL;
732}
733EXPORT_SYMBOL(msm_vidc_prepare_buf);
734
735int msm_vidc_release_buffers(void *instance, int buffer_type)
736{
737 struct msm_vidc_inst *inst = instance;
738 struct buffer_info *bi, *dummy;
739 struct v4l2_buffer buffer_info;
740 struct v4l2_plane plane[VIDEO_MAX_PLANES];
741 int i, rc = 0;
742
743 if (!inst)
744 return -EINVAL;
745
746 if (!inst->in_reconfig &&
747 inst->state > MSM_VIDC_LOAD_RESOURCES &&
748 inst->state < MSM_VIDC_RELEASE_RESOURCES_DONE) {
749 rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
750 if (rc) {
751 dprintk(VIDC_ERR,
752 "Failed to move inst: %pK to release res done\n",
753 inst);
754 }
755 }
756
757 /*
758 * In dynamic buffer mode, driver needs to release resources,
759 * but not call release buffers on firmware, as the buffers
760 * were never registered with firmware.
761 */
762 if (buffer_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
763 inst->buffer_mode_set[CAPTURE_PORT] ==
764 HAL_BUFFER_MODE_DYNAMIC) {
765 goto free_and_unmap;
766 }
767
768 mutex_lock(&inst->registeredbufs.lock);
769 list_for_each_entry(bi, &inst->registeredbufs.list, list) {
770 bool release_buf = false;
771
772 if (bi->type == buffer_type) {
773 buffer_info.type = bi->type;
774 for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
775 i++) {
776 plane[i].reserved[0] = bi->fd[i];
777 plane[i].reserved[1] = bi->buff_off[i];
778 plane[i].length = bi->size[i];
779 plane[i].m.userptr = bi->device_addr[i];
780 buffer_info.m.planes = plane;
781 dprintk(VIDC_DBG,
782 "Releasing buffer: %d, %d, %d\n",
783 buffer_info.m.planes[i].reserved[0],
784 buffer_info.m.planes[i].reserved[1],
785 buffer_info.m.planes[i].length);
786 }
787 buffer_info.length = bi->num_planes;
788 release_buf = true;
789 }
790
791 if (!release_buf)
792 continue;
793 if (inst->session_type == MSM_VIDC_DECODER)
794 rc = msm_vdec_release_buf(instance,
795 &buffer_info);
796 if (inst->session_type == MSM_VIDC_ENCODER)
797 rc = msm_venc_release_buf(instance,
798 &buffer_info);
799 if (rc)
800 dprintk(VIDC_ERR,
801 "Failed Release buffer: %d, %d, %d\n",
802 buffer_info.m.planes[0].reserved[0],
803 buffer_info.m.planes[0].reserved[1],
804 buffer_info.m.planes[0].length);
805 }
806 mutex_unlock(&inst->registeredbufs.lock);
807
808free_and_unmap:
809 mutex_lock(&inst->registeredbufs.lock);
810 list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
811 if (bi->type == buffer_type) {
812 list_del(&bi->list);
813 for (i = 0; i < bi->num_planes; i++) {
814 if (bi->handle[i] && bi->mapped[i]) {
815 dprintk(VIDC_DBG,
816 "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
817 __func__, bi, i, bi->handle[i],
818 &bi->device_addr[i], bi->fd[i],
819 bi->buff_off[i], bi->mapped[i]);
820 msm_comm_smem_free(inst,
821 bi->handle[i]);
822 }
823 }
824 kfree(bi);
825 }
826 }
827 mutex_unlock(&inst->registeredbufs.lock);
828 return rc;
829}
830EXPORT_SYMBOL(msm_vidc_release_buffers);
831
832int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
833{
834 struct msm_vidc_inst *inst = instance;
835 struct buffer_info *binfo;
836 int plane = 0;
837 int rc = 0;
838 int i;
839
840 if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
841 return -EINVAL;
842
843 if (inst->state == MSM_VIDC_CORE_INVALID ||
844 inst->core->state == VIDC_CORE_INVALID)
845 return -EINVAL;
846
847 rc = map_and_register_buf(inst, b);
848 if (rc == -EEXIST) {
849 if (atomic_read(&inst->in_flush) &&
850 is_dynamic_output_buffer_mode(b, inst)) {
851 dprintk(VIDC_ERR,
852 "Flush in progress, do not hold any buffers in driver\n");
853 msm_comm_flush_dynamic_buffers(inst);
854 }
855 return 0;
856 }
857 if (rc)
858 return rc;
859
860 for (i = 0; i < b->length; ++i) {
861 if (EXTRADATA_IDX(b->length) &&
862 (i == EXTRADATA_IDX(b->length)) &&
863 !b->m.planes[i].length) {
864 b->m.planes[i].m.userptr = 0;
865 continue;
866 }
867 mutex_lock(&inst->registeredbufs.lock);
868 binfo = get_registered_buf(inst, b, i, &plane);
869 mutex_unlock(&inst->registeredbufs.lock);
870 if (!binfo) {
871 dprintk(VIDC_ERR,
872 "This buffer is not registered: %d, %d, %d\n",
873 b->m.planes[i].reserved[0],
874 b->m.planes[i].reserved[1],
875 b->m.planes[i].length);
876 goto err_invalid_buff;
877 }
878 b->m.planes[i].m.userptr = binfo->device_addr[i];
879 dprintk(VIDC_DBG, "Queueing device address = %pa\n",
880 &binfo->device_addr[i]);
881
882 if (inst->fmts[OUTPUT_PORT].fourcc ==
883 V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
884 b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
885 rc = msm_comm_smem_cache_operations(inst,
886 binfo->handle[i], SMEM_CACHE_INVALIDATE);
887 if (rc) {
888 dprintk(VIDC_ERR,
889 "Failed to inv caches: %d\n", rc);
890 goto err_invalid_buff;
891 }
892 }
893
894 if (binfo->handle[i] &&
895 (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
896 rc = msm_comm_smem_cache_operations(inst,
897 binfo->handle[i], SMEM_CACHE_CLEAN);
898 if (rc) {
899 dprintk(VIDC_ERR,
900 "Failed to clean caches: %d\n", rc);
901 goto err_invalid_buff;
902 }
903 }
904 }
905
906 if (inst->session_type == MSM_VIDC_DECODER)
907 return msm_vdec_qbuf(instance, b);
908 if (inst->session_type == MSM_VIDC_ENCODER)
909 return msm_venc_qbuf(instance, b);
910
911err_invalid_buff:
912 return -EINVAL;
913}
914EXPORT_SYMBOL(msm_vidc_qbuf);
915
916int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
917{
918 struct msm_vidc_inst *inst = instance;
919 struct buffer_info *buffer_info = NULL;
920 int i = 0, rc = 0;
921
922 if (!inst || !b || !valid_v4l2_buffer(b, inst))
923 return -EINVAL;
924
925 if (inst->session_type == MSM_VIDC_DECODER)
926 rc = msm_vdec_dqbuf(instance, b);
927 if (inst->session_type == MSM_VIDC_ENCODER)
928 rc = msm_venc_dqbuf(instance, b);
929
930 if (rc)
931 return rc;
932
933 for (i = b->length - 1; i >= 0 ; i--) {
934 if (EXTRADATA_IDX(b->length) &&
935 i == EXTRADATA_IDX(b->length)) {
936 continue;
937 }
938 buffer_info = device_to_uvaddr(&inst->registeredbufs,
939 b->m.planes[i].m.userptr);
940
941 if (!buffer_info) {
942 dprintk(VIDC_ERR,
943 "%s no buffer info registered for buffer addr: %#lx\n",
944 __func__, b->m.planes[i].m.userptr);
945 return -EINVAL;
946 }
947
948 b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
949 b->m.planes[i].reserved[0] = buffer_info->fd[i];
950 b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
951 if (!(inst->flags & VIDC_SECURE) && !b->m.planes[i].m.userptr) {
952 dprintk(VIDC_ERR,
953 "%s: Failed to find user virtual address, %#lx, %d, %d\n",
954 __func__, b->m.planes[i].m.userptr, b->type, i);
955 return -EINVAL;
956 }
957 }
958
959 if (!buffer_info) {
960 dprintk(VIDC_ERR,
961 "%s: error - no buffer info found in registered list\n",
962 __func__);
963 return -EINVAL;
964 }
965
966 rc = output_buffer_cache_invalidate(inst, buffer_info);
967 if (rc)
968 return rc;
969
970 if (is_dynamic_output_buffer_mode(b, inst)) {
971 buffer_info->dequeued = true;
972
973 dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
974 buffer_info->fd[0]);
975 mutex_lock(&inst->registeredbufs.lock);
976 rc = unmap_and_deregister_buf(inst, buffer_info);
977 mutex_unlock(&inst->registeredbufs.lock);
978 }
979
980 return rc;
981}
982EXPORT_SYMBOL(msm_vidc_dqbuf);
983
984int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
985{
986 struct msm_vidc_inst *inst = instance;
987
988 if (!inst)
989 return -EINVAL;
990
991 if (inst->session_type == MSM_VIDC_DECODER)
992 return msm_vdec_streamon(instance, i);
993 if (inst->session_type == MSM_VIDC_ENCODER)
994 return msm_venc_streamon(instance, i);
995 return -EINVAL;
996}
997EXPORT_SYMBOL(msm_vidc_streamon);
998
999int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i)
1000{
1001 struct msm_vidc_inst *inst = instance;
1002
1003 if (!inst)
1004 return -EINVAL;
1005
1006 if (inst->session_type == MSM_VIDC_DECODER)
1007 return msm_vdec_streamoff(instance, i);
1008 if (inst->session_type == MSM_VIDC_ENCODER)
1009 return msm_venc_streamoff(instance, i);
1010 return -EINVAL;
1011}
1012EXPORT_SYMBOL(msm_vidc_streamoff);
1013
1014int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
1015{
1016 struct msm_vidc_inst *inst = instance;
1017 struct msm_vidc_capability *capability = NULL;
1018
1019 if (!inst || !fsize) {
1020 dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
1021 __func__, inst, fsize);
1022 return -EINVAL;
1023 }
1024 if (!inst->core)
1025 return -EINVAL;
1026
1027 capability = &inst->capability;
1028 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1029 fsize->stepwise.min_width = capability->width.min;
1030 fsize->stepwise.max_width = capability->width.max;
1031 fsize->stepwise.step_width = capability->width.step_size;
1032 fsize->stepwise.min_height = capability->height.min;
1033 fsize->stepwise.max_height = capability->height.max;
1034 fsize->stepwise.step_height = capability->height.step_size;
1035 return 0;
1036}
1037EXPORT_SYMBOL(msm_vidc_enum_framesizes);
1038
1039static void *vidc_get_userptr(struct device *dev, unsigned long vaddr,
1040 unsigned long size, enum dma_data_direction dma_dir)
1041{
1042 return (void *)0xdeadbeef;
1043}
1044
1045static void vidc_put_userptr(void *buf_priv)
1046{
1047
1048}
1049static const struct vb2_mem_ops msm_vidc_vb2_mem_ops = {
1050 .get_userptr = vidc_get_userptr,
1051 .put_userptr = vidc_put_userptr,
1052};
1053
1054static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
1055 enum v4l2_buf_type type, enum session_type sess)
1056{
1057 struct vb2_queue *q = NULL;
1058
1059 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1060 q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
1061 } else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1062 q = &inst->bufq[OUTPUT_PORT].vb2_bufq;
1063 } else {
1064 dprintk(VIDC_ERR, "buf_type = %d not recognised\n", type);
1065 return -EINVAL;
1066 }
1067
1068 q->type = type;
1069 q->io_modes = VB2_MMAP | VB2_USERPTR;
1070 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1071
1072 if (sess == MSM_VIDC_DECODER)
1073 q->ops = msm_vdec_get_vb2q_ops();
1074 else if (sess == MSM_VIDC_ENCODER)
1075 q->ops = msm_venc_get_vb2q_ops();
1076 q->mem_ops = &msm_vidc_vb2_mem_ops;
1077 q->drv_priv = inst;
Surajit Podder36ab4f92018-02-20 17:29:52 +05301078 q->allow_zero_bytesused = 1;
Srinu Gorlecf8c6752018-01-19 18:36:13 +05301079 return vb2_queue_init(q);
1080}
1081
1082static int setup_event_queue(void *inst,
1083 struct video_device *pvdev)
1084{
1085 int rc = 0;
1086 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1087
1088 v4l2_fh_init(&vidc_inst->event_handler, pvdev);
1089 v4l2_fh_add(&vidc_inst->event_handler);
1090
1091 return rc;
1092}
1093
1094int msm_vidc_subscribe_event(void *inst,
1095 const struct v4l2_event_subscription *sub)
1096{
1097 int rc = 0;
1098 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1099
1100 if (!inst || !sub)
1101 return -EINVAL;
1102
1103 rc = v4l2_event_subscribe(&vidc_inst->event_handler,
1104 sub, MAX_EVENTS, NULL);
1105 return rc;
1106}
1107EXPORT_SYMBOL(msm_vidc_subscribe_event);
1108
1109int msm_vidc_unsubscribe_event(void *inst,
1110 const struct v4l2_event_subscription *sub)
1111{
1112 int rc = 0;
1113 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1114
1115 if (!inst || !sub)
1116 return -EINVAL;
1117
1118 rc = v4l2_event_unsubscribe(&vidc_inst->event_handler, sub);
1119 return rc;
1120}
1121EXPORT_SYMBOL(msm_vidc_unsubscribe_event);
1122
1123int msm_vidc_dqevent(void *inst, struct v4l2_event *event)
1124{
1125 int rc = 0;
1126 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1127
1128 if (!inst || !event)
1129 return -EINVAL;
1130
1131 rc = v4l2_event_dequeue(&vidc_inst->event_handler, event, false);
1132 return rc;
1133}
1134EXPORT_SYMBOL(msm_vidc_dqevent);
1135
1136static bool msm_vidc_check_for_inst_overload(struct msm_vidc_core *core)
1137{
1138 u32 instance_count = 0;
1139 u32 secure_instance_count = 0;
1140 struct msm_vidc_inst *inst = NULL;
1141 bool overload = false;
1142
1143 mutex_lock(&core->lock);
1144 list_for_each_entry(inst, &core->instances, list) {
1145 instance_count++;
1146 /* This flag is not updated yet for the current instance */
1147 if (inst->flags & VIDC_SECURE)
1148 secure_instance_count++;
1149 }
1150 mutex_unlock(&core->lock);
1151
1152 /* Instance count includes current instance as well. */
1153
1154 if ((instance_count > core->resources.max_inst_count) ||
1155 (secure_instance_count > core->resources.max_secure_inst_count))
1156 overload = true;
1157 return overload;
1158}
1159
1160void *msm_vidc_open(int core_id, int session_type)
1161{
1162 struct msm_vidc_inst *inst = NULL;
1163 struct msm_vidc_core *core = NULL;
1164 int rc = 0;
1165 int i = 0;
1166
1167 if (core_id >= MSM_VIDC_CORES_MAX ||
1168 session_type >= MSM_VIDC_MAX_DEVICES) {
1169 dprintk(VIDC_ERR, "Invalid input, core_id = %d, session = %d\n",
1170 core_id, session_type);
1171 goto err_invalid_core;
1172 }
1173 core = get_vidc_core(core_id);
1174 if (!core) {
1175 dprintk(VIDC_ERR,
1176 "Failed to find core for core_id = %d\n", core_id);
1177 goto err_invalid_core;
1178 }
1179
1180 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
1181 if (!inst) {
1182 dprintk(VIDC_ERR, "Failed to allocate memory\n");
1183 rc = -ENOMEM;
1184 goto err_invalid_core;
1185 }
1186
1187 pr_info(VIDC_DBG_TAG "Opening video instance: %pK, %d\n",
1188 VIDC_MSG_PRIO2STRING(VIDC_INFO), inst, session_type);
1189 mutex_init(&inst->sync_lock);
1190 mutex_init(&inst->bufq[CAPTURE_PORT].lock);
1191 mutex_init(&inst->bufq[OUTPUT_PORT].lock);
1192 mutex_init(&inst->lock);
1193
1194 INIT_MSM_VIDC_LIST(&inst->pendingq);
1195 INIT_MSM_VIDC_LIST(&inst->scratchbufs);
1196 INIT_MSM_VIDC_LIST(&inst->persistbufs);
1197 INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
1198 INIT_MSM_VIDC_LIST(&inst->outputbufs);
1199 INIT_MSM_VIDC_LIST(&inst->registeredbufs);
1200
1201 kref_init(&inst->kref);
1202
1203 inst->session_type = session_type;
1204 inst->state = MSM_VIDC_CORE_UNINIT_DONE;
1205 inst->core = core;
1206 inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
1207 inst->instant_bitrate = 0;
1208 inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
1209 inst->colour_space = MSM_VIDC_BT601_6_525;
1210
1211 for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
1212 i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
1213 init_completion(&inst->completions[i]);
1214 }
1215 inst->mem_client = msm_smem_new_client(SMEM_ION,
1216 &inst->core->resources, session_type);
1217 if (!inst->mem_client) {
1218 dprintk(VIDC_ERR, "Failed to create memory client\n");
1219 goto fail_mem_client;
1220 }
1221 if (session_type == MSM_VIDC_DECODER) {
1222 msm_vdec_inst_init(inst);
1223 rc = msm_vdec_ctrl_init(inst);
1224 } else if (session_type == MSM_VIDC_ENCODER) {
1225 msm_venc_inst_init(inst);
1226 rc = msm_venc_ctrl_init(inst);
1227 }
1228
1229 if (rc)
1230 goto fail_bufq_capture;
1231
1232 msm_dcvs_init(inst);
1233 rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
1234 session_type);
1235 if (rc) {
1236 dprintk(VIDC_ERR,
1237 "Failed to initialize vb2 queue on capture port\n");
1238 goto fail_bufq_capture;
1239 }
1240 rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1241 session_type);
1242 if (rc) {
1243 dprintk(VIDC_ERR,
1244 "Failed to initialize vb2 queue on capture port\n");
1245 goto fail_bufq_output;
1246 }
1247
1248 setup_event_queue(inst, &core->vdev[session_type].vdev);
1249
1250 mutex_lock(&core->lock);
1251 list_add_tail(&inst->list, &core->instances);
1252 mutex_unlock(&core->lock);
1253
1254 rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
1255 if (rc) {
1256 dprintk(VIDC_ERR,
1257 "Failed to move video instance to init state\n");
1258 goto fail_init;
1259 }
1260
1261 if (msm_vidc_check_for_inst_overload(core)) {
1262 dprintk(VIDC_ERR,
1263 "Instance count reached Max limit, rejecting session");
1264 goto fail_init;
1265 }
1266
1267 inst->debugfs_root =
1268 msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
1269
1270 return inst;
1271fail_init:
1272 v4l2_fh_del(&inst->event_handler);
1273 v4l2_fh_exit(&inst->event_handler);
1274 vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
1275
1276 mutex_lock(&core->lock);
1277 list_del(&inst->list);
1278 mutex_unlock(&core->lock);
1279
1280fail_bufq_output:
1281 vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
1282fail_bufq_capture:
1283 msm_comm_ctrl_deinit(inst);
1284 msm_smem_delete_client(inst->mem_client);
1285fail_mem_client:
1286 kfree(inst);
1287 inst = NULL;
1288err_invalid_core:
1289 return inst;
1290}
1291EXPORT_SYMBOL(msm_vidc_open);
1292
1293static void cleanup_instance(struct msm_vidc_inst *inst)
1294{
1295 struct vb2_buf_entry *entry, *dummy;
1296
1297 if (inst) {
1298
1299 mutex_lock(&inst->pendingq.lock);
1300 list_for_each_entry_safe(entry, dummy, &inst->pendingq.list,
1301 list) {
1302 list_del(&entry->list);
1303 kfree(entry);
1304 }
1305 mutex_unlock(&inst->pendingq.lock);
1306
1307 if (msm_comm_release_scratch_buffers(inst, false)) {
1308 dprintk(VIDC_ERR,
1309 "Failed to release scratch buffers\n");
1310 }
1311
1312 if (msm_comm_release_persist_buffers(inst)) {
1313 dprintk(VIDC_ERR,
1314 "Failed to release persist buffers\n");
1315 }
1316
1317 if (msm_comm_release_output_buffers(inst)) {
1318 dprintk(VIDC_ERR,
1319 "Failed to release output buffers\n");
1320 }
1321
1322 if (inst->extradata_handle)
1323 msm_comm_smem_free(inst, inst->extradata_handle);
1324
1325 mutex_lock(&inst->pending_getpropq.lock);
1326 if (!list_empty(&inst->pending_getpropq.list)) {
1327 dprintk(VIDC_ERR,
1328 "pending_getpropq not empty\n");
1329 WARN_ON(VIDC_DBG_WARN_ENABLE);
1330 }
1331 mutex_unlock(&inst->pending_getpropq.lock);
1332 }
1333}
1334
1335int msm_vidc_destroy(struct msm_vidc_inst *inst)
1336{
1337 struct msm_vidc_core *core;
1338 int i = 0;
1339
1340 if (!inst || !inst->core)
1341 return -EINVAL;
1342
1343 core = inst->core;
1344
1345 mutex_lock(&core->lock);
1346 /* inst->list lives in core->instances */
1347 list_del(&inst->list);
1348 mutex_unlock(&core->lock);
1349
1350 msm_comm_ctrl_deinit(inst);
1351
1352 v4l2_fh_del(&inst->event_handler);
1353 v4l2_fh_exit(&inst->event_handler);
1354
1355 for (i = 0; i < MAX_PORT_NUM; i++)
1356 vb2_queue_release(&inst->bufq[i].vb2_bufq);
1357
1358 mutex_destroy(&inst->sync_lock);
1359 mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
1360 mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
1361 mutex_destroy(&inst->lock);
1362
1363 msm_vidc_debugfs_deinit_inst(inst);
1364 pr_info(VIDC_DBG_TAG "Closed video instance: %pK\n",
1365 VIDC_MSG_PRIO2STRING(VIDC_INFO), inst);
1366 kfree(inst);
1367 return 0;
1368}
1369
1370int msm_vidc_close(void *instance)
1371{
1372 void close_helper(struct kref *kref)
1373 {
1374 struct msm_vidc_inst *inst = container_of(kref,
1375 struct msm_vidc_inst, kref);
1376
1377 msm_vidc_destroy(inst);
1378 }
1379
1380 struct msm_vidc_inst *inst = instance;
1381 struct buffer_info *bi, *dummy;
1382 int rc = 0;
1383
1384 if (!inst || !inst->core)
1385 return -EINVAL;
1386
1387
1388 mutex_lock(&inst->registeredbufs.lock);
1389 list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
1390 if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1391 int i = 0;
1392
1393 list_del(&bi->list);
1394
1395 for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
1396 i++) {
1397 if (bi->handle[i] && bi->mapped[i])
1398 msm_comm_smem_free(inst, bi->handle[i]);
1399 }
1400
1401 kfree(bi);
1402 }
1403 }
1404 mutex_unlock(&inst->registeredbufs.lock);
1405
1406 cleanup_instance(inst);
1407 if (inst->state != MSM_VIDC_CORE_INVALID &&
1408 inst->core->state != VIDC_CORE_INVALID)
1409 rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
1410 else
1411 rc = msm_comm_force_cleanup(inst);
1412 if (rc)
1413 dprintk(VIDC_ERR,
1414 "Failed to move video instance to uninit state\n");
1415
1416 msm_comm_session_clean(inst);
1417 msm_smem_delete_client(inst->mem_client);
1418
1419 kref_put(&inst->kref, close_helper);
1420 return 0;
1421}
1422EXPORT_SYMBOL(msm_vidc_close);
1423
1424int msm_vidc_suspend(int core_id)
1425{
1426 return msm_comm_suspend(core_id);
1427}
1428EXPORT_SYMBOL(msm_vidc_suspend);
1429