blob: 7b225110d91db23d224e9e5b4e4119827e245c40 [file] [log] [blame]
Srinu Gorlecf8c6752018-01-19 18:36:13 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <media/msm_vidc.h>
17#include "msm_vidc_internal.h"
18#include "msm_vidc_debug.h"
19#include "msm_vdec.h"
20#include "msm_venc.h"
21#include "msm_vidc_common.h"
22#include <linux/delay.h>
23#include "vidc_hfi_api.h"
24#include "msm_vidc_dcvs.h"
25
26#define MAX_EVENTS 30
27
28static int get_poll_flags(void *instance)
29{
30 struct msm_vidc_inst *inst = instance;
31 struct vb2_queue *outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
32 struct vb2_queue *capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
33 struct vb2_buffer *out_vb = NULL;
34 struct vb2_buffer *cap_vb = NULL;
35 unsigned long flags;
36 int rc = 0;
37
38 if (v4l2_event_pending(&inst->event_handler))
39 rc |= POLLPRI;
40
41 spin_lock_irqsave(&capq->done_lock, flags);
42 if (!list_empty(&capq->done_list))
43 cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
44 done_entry);
45 if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE
46 || cap_vb->state == VB2_BUF_STATE_ERROR))
47 rc |= POLLIN | POLLRDNORM;
48 spin_unlock_irqrestore(&capq->done_lock, flags);
49
50 spin_lock_irqsave(&outq->done_lock, flags);
51 if (!list_empty(&outq->done_list))
52 out_vb = list_first_entry(&outq->done_list, struct vb2_buffer,
53 done_entry);
54 if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE
55 || out_vb->state == VB2_BUF_STATE_ERROR))
56 rc |= POLLOUT | POLLWRNORM;
57 spin_unlock_irqrestore(&outq->done_lock, flags);
58
59 return rc;
60}
61
62int msm_vidc_poll(void *instance, struct file *filp,
63 struct poll_table_struct *wait)
64{
65 struct msm_vidc_inst *inst = instance;
66 struct vb2_queue *outq = NULL;
67 struct vb2_queue *capq = NULL;
68
69 if (!inst)
70 return -EINVAL;
71
72 outq = &inst->bufq[OUTPUT_PORT].vb2_bufq;
73 capq = &inst->bufq[CAPTURE_PORT].vb2_bufq;
74
75 poll_wait(filp, &inst->event_handler.wait, wait);
76 poll_wait(filp, &capq->done_wq, wait);
77 poll_wait(filp, &outq->done_wq, wait);
78 return get_poll_flags(inst);
79}
80EXPORT_SYMBOL(msm_vidc_poll);
81
82int msm_vidc_querycap(void *instance, struct v4l2_capability *cap)
83{
84 struct msm_vidc_inst *inst = instance;
85
86 if (!inst || !cap)
87 return -EINVAL;
88
89 if (inst->session_type == MSM_VIDC_DECODER)
90 return msm_vdec_querycap(instance, cap);
91 else if (inst->session_type == MSM_VIDC_ENCODER)
92 return msm_venc_querycap(instance, cap);
93 return -EINVAL;
94}
95EXPORT_SYMBOL(msm_vidc_querycap);
96
97int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
98{
99 struct msm_vidc_inst *inst = instance;
100
101 if (!inst || !f)
102 return -EINVAL;
103
104 if (inst->session_type == MSM_VIDC_DECODER)
105 return msm_vdec_enum_fmt(instance, f);
106 else if (inst->session_type == MSM_VIDC_ENCODER)
107 return msm_venc_enum_fmt(instance, f);
108 return -EINVAL;
109}
110EXPORT_SYMBOL(msm_vidc_enum_fmt);
111
112int msm_vidc_s_fmt(void *instance, struct v4l2_format *f)
113{
114 struct msm_vidc_inst *inst = instance;
115
116 if (!inst || !f)
117 return -EINVAL;
118
119 if (inst->session_type == MSM_VIDC_DECODER)
120 return msm_vdec_s_fmt(instance, f);
121 if (inst->session_type == MSM_VIDC_ENCODER)
122 return msm_venc_s_fmt(instance, f);
123 return -EINVAL;
124}
125EXPORT_SYMBOL(msm_vidc_s_fmt);
126
127int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
128{
129 struct msm_vidc_inst *inst = instance;
130
131 if (!inst || !f)
132 return -EINVAL;
133
134 if (inst->session_type == MSM_VIDC_DECODER)
135 return msm_vdec_g_fmt(instance, f);
136 else if (inst->session_type == MSM_VIDC_ENCODER)
137 return msm_venc_g_fmt(instance, f);
138 return -EINVAL;
139}
140EXPORT_SYMBOL(msm_vidc_g_fmt);
141
142int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
143{
144 struct msm_vidc_inst *inst = instance;
145
146 if (!inst || !control)
147 return -EINVAL;
148
149 return msm_comm_s_ctrl(instance, control);
150}
151EXPORT_SYMBOL(msm_vidc_s_ctrl);
152
153int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
154{
155 struct msm_vidc_inst *inst = instance;
156
157 if (!inst || !control)
158 return -EINVAL;
159
160 return msm_comm_g_ctrl(instance, control);
161}
162EXPORT_SYMBOL(msm_vidc_g_ctrl);
163
164int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
165{
166 struct msm_vidc_inst *inst = instance;
167
168 if (!inst || !control)
169 return -EINVAL;
170
171 if (inst->session_type == MSM_VIDC_DECODER)
172 return msm_vdec_s_ext_ctrl(instance, control);
173 if (inst->session_type == MSM_VIDC_ENCODER)
174 return msm_venc_s_ext_ctrl(instance, control);
175 return -EINVAL;
176}
177EXPORT_SYMBOL(msm_vidc_s_ext_ctrl);
178
179int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b)
180{
181 struct msm_vidc_inst *inst = instance;
182
183 if (!inst || !b)
184 return -EINVAL;
185
186 if (inst->session_type == MSM_VIDC_DECODER)
187 return msm_vdec_reqbufs(instance, b);
188 if (inst->session_type == MSM_VIDC_ENCODER)
189 return msm_venc_reqbufs(instance, b);
190 return -EINVAL;
191}
192EXPORT_SYMBOL(msm_vidc_reqbufs);
193
194struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
195 struct v4l2_buffer *b, int idx, int *plane)
196{
197 struct buffer_info *temp;
198 struct buffer_info *ret = NULL;
199 int i;
200 int fd = b->m.planes[idx].reserved[0];
201 u32 buff_off = b->m.planes[idx].reserved[1];
202 u32 size = b->m.planes[idx].length;
203 ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr;
204
205 if (fd < 0 || !plane) {
206 dprintk(VIDC_ERR, "Invalid input\n");
207 goto err_invalid_input;
208 }
209
210 WARN(!mutex_is_locked(&inst->registeredbufs.lock),
211 "Registered buf lock is not acquired for %s", __func__);
212
213 *plane = 0;
214 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
215 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
216 bool ion_hndl_matches = temp->handle[i] ?
217 msm_smem_compare_buffers(inst->mem_client, fd,
218 temp->handle[i]->smem_priv) : false;
219 bool device_addr_matches = device_addr ==
220 temp->device_addr[i];
221 bool contains_within = CONTAINS(temp->buff_off[i],
222 temp->size[i], buff_off) ||
223 CONTAINS(buff_off, size, temp->buff_off[i]);
224 bool overlaps = OVERLAPS(buff_off, size,
225 temp->buff_off[i], temp->size[i]);
226
227 if (!temp->inactive &&
228 (ion_hndl_matches || device_addr_matches) &&
229 (contains_within || overlaps)) {
230 dprintk(VIDC_DBG,
231 "This memory region is already mapped\n");
232 ret = temp;
233 *plane = i;
234 break;
235 }
236 }
237 if (ret)
238 break;
239 }
240
241err_invalid_input:
242 return ret;
243}
244
245static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
246{
247 struct buffer_info *temp;
248 struct msm_smem *same_fd_handle = NULL;
249
250 int i;
251
252 if (!fd)
253 return NULL;
254
255 if (!inst || fd < 0) {
256 dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
257 goto err_invalid_input;
258 }
259
260 mutex_lock(&inst->registeredbufs.lock);
261 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
262 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
263 bool ion_hndl_matches = temp->handle[i] ?
264 msm_smem_compare_buffers(inst->mem_client, fd,
265 temp->handle[i]->smem_priv) : false;
266 if (ion_hndl_matches && temp->mapped[i]) {
267 temp->same_fd_ref[i]++;
268 dprintk(VIDC_INFO,
269 "Found same fd buffer\n");
270 same_fd_handle = temp->handle[i];
271 break;
272 }
273 }
274 if (same_fd_handle)
275 break;
276 }
277 mutex_unlock(&inst->registeredbufs.lock);
278
279err_invalid_input:
280 return same_fd_handle;
281}
282
283struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
284 ion_phys_addr_t device_addr)
285{
286 struct buffer_info *temp = NULL;
287 bool found = false;
288 int i;
289
290 if (!buf_list || !device_addr) {
291 dprintk(VIDC_ERR,
292 "Invalid input- device_addr: %pa buf_list: %pK\n",
293 &device_addr, buf_list);
294 goto err_invalid_input;
295 }
296
297 mutex_lock(&buf_list->lock);
298 list_for_each_entry(temp, &buf_list->list, list) {
299 for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
300 if (!temp->inactive &&
301 temp->device_addr[i] == device_addr) {
302 dprintk(VIDC_INFO,
303 "Found same fd buffer\n");
304 found = true;
305 break;
306 }
307 }
308
309 if (found)
310 break;
311 }
312 mutex_unlock(&buf_list->lock);
313
314err_invalid_input:
315 return temp;
316}
317
318static inline void populate_buf_info(struct buffer_info *binfo,
319 struct v4l2_buffer *b, u32 i)
320{
321 if (i >= VIDEO_MAX_PLANES) {
322 dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
323 return;
324 }
325 binfo->type = b->type;
326 binfo->fd[i] = b->m.planes[i].reserved[0];
327 binfo->buff_off[i] = b->m.planes[i].reserved[1];
328 binfo->size[i] = b->m.planes[i].length;
329 binfo->uvaddr[i] = b->m.planes[i].m.userptr;
330 binfo->num_planes = b->length;
331 binfo->memory = b->memory;
332 binfo->v4l2_index = b->index;
333 binfo->timestamp.tv_sec = b->timestamp.tv_sec;
334 binfo->timestamp.tv_usec = b->timestamp.tv_usec;
335 dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d",
336 __func__, i, binfo->fd[0], b->index);
337}
338
339static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
340 struct buffer_info *binfo)
341{
342 int i = 0;
343
344 b->type = binfo->type;
345 b->length = binfo->num_planes;
346 b->memory = binfo->memory;
347 b->index = binfo->v4l2_index;
348 b->timestamp.tv_sec = binfo->timestamp.tv_sec;
349 b->timestamp.tv_usec = binfo->timestamp.tv_usec;
350 binfo->dequeued = false;
351 for (i = 0; i < binfo->num_planes; ++i) {
352 b->m.planes[i].reserved[0] = binfo->fd[i];
353 b->m.planes[i].reserved[1] = binfo->buff_off[i];
354 b->m.planes[i].length = binfo->size[i];
355 b->m.planes[i].m.userptr = binfo->device_addr[i];
356 dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i],
357 binfo->buff_off[i], binfo->size[i],
358 &binfo->device_addr[i]);
359 }
360}
361
362static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
363 struct v4l2_plane *p, enum hal_buffer buffer_type)
364{
365 struct msm_smem *handle = NULL;
366
367 handle = msm_comm_smem_user_to_kernel(inst,
368 p->reserved[0],
369 p->reserved[1],
370 buffer_type);
371 if (!handle) {
372 dprintk(VIDC_ERR,
373 "%s: Failed to get device buffer address\n", __func__);
374 return NULL;
375 }
376 return handle;
377}
378
379static inline enum hal_buffer get_hal_buffer_type(
380 struct msm_vidc_inst *inst, struct v4l2_buffer *b)
381{
382 if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
383 return HAL_BUFFER_INPUT;
384 else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
385 return HAL_BUFFER_OUTPUT;
386 else
387 return -EINVAL;
388}
389
390static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
391 struct msm_vidc_inst *inst)
392{
393 return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
394 inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
395}
396
397
398static inline bool is_encoder_input_buffer(struct v4l2_buffer *b,
399 struct msm_vidc_inst *inst)
400{
401 return b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
402 inst->session_type == MSM_VIDC_ENCODER;
403}
404
405static inline void save_v4l2_buffer(struct v4l2_buffer *b,
406 struct buffer_info *binfo)
407{
408 int i = 0;
409
410 for (i = 0; i < b->length; ++i) {
411 if (EXTRADATA_IDX(b->length) &&
412 (i == EXTRADATA_IDX(b->length)) &&
413 !b->m.planes[i].length) {
414 continue;
415 }
416 populate_buf_info(binfo, b, i);
417 }
418}
419
420int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
421{
422 struct buffer_info *binfo = NULL;
423 struct buffer_info *temp = NULL, *iterator = NULL;
424 int plane = 0;
425 int i = 0, rc = 0;
426 struct msm_smem *same_fd_handle = NULL;
427
428 if (!b || !inst) {
429 dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
430 return -EINVAL;
431 }
432
433 binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
434 if (!binfo) {
435 dprintk(VIDC_ERR, "Out of memory\n");
436 rc = -ENOMEM;
437 goto exit;
438 }
439 if (b->length > VIDEO_MAX_PLANES) {
440 dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
441 b->length, VIDEO_MAX_PLANES);
442 rc = -EINVAL;
443 goto exit;
444 }
445
446 dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
447 binfo, b->m.planes[0].reserved[0], b->type);
448
449 for (i = 0; i < b->length; ++i) {
450 rc = 0;
451 if (EXTRADATA_IDX(b->length) &&
452 (i == EXTRADATA_IDX(b->length)) &&
453 !b->m.planes[i].length) {
454 continue;
455 }
456 mutex_lock(&inst->registeredbufs.lock);
457 temp = get_registered_buf(inst, b, i, &plane);
458 if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
459 dprintk(VIDC_DBG,
460 "This memory region has already been prepared\n");
461 rc = 0;
462 mutex_unlock(&inst->registeredbufs.lock);
463 goto exit;
464 }
465
466 if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
467 /*
468 * Buffer is already present in registered list
469 * increment ref_count, populate new values of v4l2
470 * buffer in existing buffer_info struct.
471 *
472 * We will use the saved buffer info and queue it when
473 * we receive RELEASE_BUFFER_REFERENCE EVENT from f/w.
474 */
475 dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n");
476 temp->inactive = false;
477 list_for_each_entry(iterator,
478 &inst->registeredbufs.list, list) {
479 if (iterator == temp) {
480 rc = buf_ref_get(inst, temp);
481 save_v4l2_buffer(b, temp);
482 break;
483 }
484 }
485 }
486 mutex_unlock(&inst->registeredbufs.lock);
487 /*
488 * rc == 1,
489 * buffer is mapped, fw has released all reference, so skip
490 * mapping and queue it immediately.
491 *
492 * rc == 2,
493 * buffer is mapped and fw is holding a reference, hold it in
494 * the driver and queue it later when fw has released
495 */
496 if (rc == 1) {
497 rc = 0;
498 goto exit;
499 } else if (rc == 2) {
500 rc = -EEXIST;
501 goto exit;
502 }
503
504 same_fd_handle = get_same_fd_buffer(
505 inst, b->m.planes[i].reserved[0]);
506
507 populate_buf_info(binfo, b, i);
508 if (same_fd_handle) {
509 binfo->device_addr[i] =
510 same_fd_handle->device_addr + binfo->buff_off[i];
511 b->m.planes[i].m.userptr = binfo->device_addr[i];
512 binfo->mapped[i] = false;
513 binfo->handle[i] = same_fd_handle;
514 } else {
515 binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
516 get_hal_buffer_type(inst, b));
517 if (!binfo->handle[i]) {
518 rc = -EINVAL;
519 goto exit;
520 }
521
522 binfo->mapped[i] = true;
523 binfo->device_addr[i] = binfo->handle[i]->device_addr +
524 binfo->buff_off[i];
525 b->m.planes[i].m.userptr = binfo->device_addr[i];
526 }
527
528 /* We maintain one ref count for all planes*/
529 if (!i && is_dynamic_output_buffer_mode(b, inst)) {
530 rc = buf_ref_get(inst, binfo);
531 if (rc < 0)
532 goto exit;
533 }
534 dprintk(VIDC_DBG,
535 "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
536 __func__, binfo, i, binfo->handle[i],
537 &binfo->device_addr[i], binfo->fd[i],
538 binfo->buff_off[i], binfo->mapped[i]);
539 }
540
541 mutex_lock(&inst->registeredbufs.lock);
542 list_add_tail(&binfo->list, &inst->registeredbufs.list);
543 mutex_unlock(&inst->registeredbufs.lock);
544 return 0;
545
546exit:
547 kfree(binfo);
548 return rc;
549}
550int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
551 struct buffer_info *binfo)
552{
553 int i = 0;
554 struct buffer_info *temp = NULL;
555 bool found = false, keep_node = false;
556
557 if (!inst || !binfo) {
558 dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n",
559 __func__, inst, binfo);
560 return -EINVAL;
561 }
562
563 WARN(!mutex_is_locked(&inst->registeredbufs.lock),
564 "Registered buf lock is not acquired for %s", __func__);
565
566 /*
567 * Make sure the buffer to be unmapped and deleted
568 * from the registered list is present in the list.
569 */
570 list_for_each_entry(temp, &inst->registeredbufs.list, list) {
571 if (temp == binfo) {
572 found = true;
573 break;
574 }
575 }
576
577 /*
578 * Free the buffer info only if
579 * - buffer info has not been deleted from registered list
580 * - vidc client has called dqbuf on the buffer
581 * - no references are held on the buffer
582 */
583 if (!found || !temp || !temp->pending_deletion || !temp->dequeued)
584 goto exit;
585
586 for (i = 0; i < temp->num_planes; i++) {
587 dprintk(VIDC_DBG,
588 "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
589 __func__, temp, i, temp->handle[i],
590 &temp->device_addr[i], temp->fd[i],
591 temp->buff_off[i], temp->mapped[i]);
592 /*
593 * Unmap the handle only if the buffer has been mapped and no
594 * other buffer has a reference to this buffer.
595 * In case of buffers with same fd, we will map the buffer only
596 * once and subsequent buffers will refer to the mapped buffer's
597 * device address.
598 * For buffers which share the same fd, do not unmap and keep
599 * the buffer info in registered list.
600 */
601 if (temp->handle[i] && temp->mapped[i] &&
602 !temp->same_fd_ref[i]) {
603 msm_comm_smem_free(inst,
604 temp->handle[i]);
605 }
606
607 if (temp->same_fd_ref[i])
608 keep_node = true;
609 else {
610 temp->fd[i] = 0;
611 temp->handle[i] = 0;
612 temp->device_addr[i] = 0;
613 temp->uvaddr[i] = 0;
614 }
615 }
616 if (!keep_node) {
617 dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp);
618 list_del(&temp->list);
619 kfree(temp);
620 } else {
621 temp->inactive = true;
622 dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp);
623 }
624exit:
625 return 0;
626}
627
628
629int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
630 struct buffer_info *binfo)
631{
632 struct v4l2_buffer b = {0};
633 struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} };
634
635 if (!binfo) {
636 dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo);
637 return -EINVAL;
638 }
639 dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]);
640
641 b.m.planes = plane;
642 repopulate_v4l2_buffer(&b, binfo);
643
644 if (inst->session_type == MSM_VIDC_DECODER)
645 return msm_vdec_qbuf(inst, &b);
646 if (inst->session_type == MSM_VIDC_ENCODER)
647 return msm_venc_qbuf(inst, &b);
648
649 return -EINVAL;
650}
651
652int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
653 struct buffer_info *binfo)
654{
655 int i = 0;
656 int rc = 0;
657
658 if (!inst) {
659 dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
660 return -EINVAL;
661 }
662
663 if (!binfo) {
664 dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n",
665 __func__, inst);
666 return -EINVAL;
667 }
668
669 if (binfo->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
670 return 0;
671
672
673 for (i = 0; i < binfo->num_planes; i++) {
674 if (binfo->handle[i]) {
675 rc = msm_comm_smem_cache_operations(inst,
676 binfo->handle[i], SMEM_CACHE_INVALIDATE);
677 if (rc) {
678 dprintk(VIDC_ERR,
679 "%s: Failed to clean caches: %d\n",
680 __func__, rc);
681 return -EINVAL;
682 }
683 } else
684 dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n",
685 __func__, i);
686 }
687 return 0;
688}
689
690static bool valid_v4l2_buffer(struct v4l2_buffer *b,
691 struct msm_vidc_inst *inst) {
692 enum vidc_ports port =
693 !V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
694 b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
695 b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
696 MAX_PORT_NUM;
697
698 return port != MAX_PORT_NUM &&
699 inst->fmts[port].num_planes == b->length;
700}
701
702int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
703{
704 struct msm_vidc_inst *inst = instance;
705
706 if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
707 return -EINVAL;
708
709 if (inst->state == MSM_VIDC_CORE_INVALID ||
710 inst->core->state == VIDC_CORE_INVALID)
711 return -EINVAL;
712
713 if (is_dynamic_output_buffer_mode(b, inst))
714 return 0;
715
716 if (map_and_register_buf(inst, b))
717 return -EINVAL;
718
719 if (inst->session_type == MSM_VIDC_DECODER)
720 return msm_vdec_prepare_buf(instance, b);
721 if (inst->session_type == MSM_VIDC_ENCODER)
722 return msm_venc_prepare_buf(instance, b);
723 return -EINVAL;
724}
725EXPORT_SYMBOL(msm_vidc_prepare_buf);
726
727int msm_vidc_release_buffers(void *instance, int buffer_type)
728{
729 struct msm_vidc_inst *inst = instance;
730 struct buffer_info *bi, *dummy;
731 struct v4l2_buffer buffer_info;
732 struct v4l2_plane plane[VIDEO_MAX_PLANES];
733 int i, rc = 0;
734
735 if (!inst)
736 return -EINVAL;
737
738 if (!inst->in_reconfig &&
739 inst->state > MSM_VIDC_LOAD_RESOURCES &&
740 inst->state < MSM_VIDC_RELEASE_RESOURCES_DONE) {
741 rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
742 if (rc) {
743 dprintk(VIDC_ERR,
744 "Failed to move inst: %pK to release res done\n",
745 inst);
746 }
747 }
748
749 /*
750 * In dynamic buffer mode, driver needs to release resources,
751 * but not call release buffers on firmware, as the buffers
752 * were never registered with firmware.
753 */
754 if (buffer_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
755 inst->buffer_mode_set[CAPTURE_PORT] ==
756 HAL_BUFFER_MODE_DYNAMIC) {
757 goto free_and_unmap;
758 }
759
760 mutex_lock(&inst->registeredbufs.lock);
761 list_for_each_entry(bi, &inst->registeredbufs.list, list) {
762 bool release_buf = false;
763
764 if (bi->type == buffer_type) {
765 buffer_info.type = bi->type;
766 for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
767 i++) {
768 plane[i].reserved[0] = bi->fd[i];
769 plane[i].reserved[1] = bi->buff_off[i];
770 plane[i].length = bi->size[i];
771 plane[i].m.userptr = bi->device_addr[i];
772 buffer_info.m.planes = plane;
773 dprintk(VIDC_DBG,
774 "Releasing buffer: %d, %d, %d\n",
775 buffer_info.m.planes[i].reserved[0],
776 buffer_info.m.planes[i].reserved[1],
777 buffer_info.m.planes[i].length);
778 }
779 buffer_info.length = bi->num_planes;
780 release_buf = true;
781 }
782
783 if (!release_buf)
784 continue;
785 if (inst->session_type == MSM_VIDC_DECODER)
786 rc = msm_vdec_release_buf(instance,
787 &buffer_info);
788 if (inst->session_type == MSM_VIDC_ENCODER)
789 rc = msm_venc_release_buf(instance,
790 &buffer_info);
791 if (rc)
792 dprintk(VIDC_ERR,
793 "Failed Release buffer: %d, %d, %d\n",
794 buffer_info.m.planes[0].reserved[0],
795 buffer_info.m.planes[0].reserved[1],
796 buffer_info.m.planes[0].length);
797 }
798 mutex_unlock(&inst->registeredbufs.lock);
799
800free_and_unmap:
801 mutex_lock(&inst->registeredbufs.lock);
802 list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
803 if (bi->type == buffer_type) {
804 list_del(&bi->list);
805 for (i = 0; i < bi->num_planes; i++) {
806 if (bi->handle[i] && bi->mapped[i]) {
807 dprintk(VIDC_DBG,
808 "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
809 __func__, bi, i, bi->handle[i],
810 &bi->device_addr[i], bi->fd[i],
811 bi->buff_off[i], bi->mapped[i]);
812 msm_comm_smem_free(inst,
813 bi->handle[i]);
814 }
815 }
816 kfree(bi);
817 }
818 }
819 mutex_unlock(&inst->registeredbufs.lock);
820 return rc;
821}
822EXPORT_SYMBOL(msm_vidc_release_buffers);
823
824int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
825{
826 struct msm_vidc_inst *inst = instance;
827 struct buffer_info *binfo;
828 int plane = 0;
829 int rc = 0;
830 int i;
831
832 if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
833 return -EINVAL;
834
835 if (inst->state == MSM_VIDC_CORE_INVALID ||
836 inst->core->state == VIDC_CORE_INVALID)
837 return -EINVAL;
838
839 rc = map_and_register_buf(inst, b);
840 if (rc == -EEXIST) {
841 if (atomic_read(&inst->in_flush) &&
842 is_dynamic_output_buffer_mode(b, inst)) {
843 dprintk(VIDC_ERR,
844 "Flush in progress, do not hold any buffers in driver\n");
845 msm_comm_flush_dynamic_buffers(inst);
846 }
847 return 0;
848 }
849 if (rc)
850 return rc;
851
852 for (i = 0; i < b->length; ++i) {
853 if (EXTRADATA_IDX(b->length) &&
854 (i == EXTRADATA_IDX(b->length)) &&
855 !b->m.planes[i].length) {
856 b->m.planes[i].m.userptr = 0;
857 continue;
858 }
859 mutex_lock(&inst->registeredbufs.lock);
860 binfo = get_registered_buf(inst, b, i, &plane);
861 mutex_unlock(&inst->registeredbufs.lock);
862 if (!binfo) {
863 dprintk(VIDC_ERR,
864 "This buffer is not registered: %d, %d, %d\n",
865 b->m.planes[i].reserved[0],
866 b->m.planes[i].reserved[1],
867 b->m.planes[i].length);
868 goto err_invalid_buff;
869 }
870 b->m.planes[i].m.userptr = binfo->device_addr[i];
871 dprintk(VIDC_DBG, "Queueing device address = %pa\n",
872 &binfo->device_addr[i]);
873
874 if (inst->fmts[OUTPUT_PORT].fourcc ==
875 V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
876 b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
877 rc = msm_comm_smem_cache_operations(inst,
878 binfo->handle[i], SMEM_CACHE_INVALIDATE);
879 if (rc) {
880 dprintk(VIDC_ERR,
881 "Failed to inv caches: %d\n", rc);
882 goto err_invalid_buff;
883 }
884 }
885
886 if (binfo->handle[i] &&
887 (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
888 rc = msm_comm_smem_cache_operations(inst,
889 binfo->handle[i], SMEM_CACHE_CLEAN);
890 if (rc) {
891 dprintk(VIDC_ERR,
892 "Failed to clean caches: %d\n", rc);
893 goto err_invalid_buff;
894 }
895 }
896 }
897
898 if (inst->session_type == MSM_VIDC_DECODER)
899 return msm_vdec_qbuf(instance, b);
900 if (inst->session_type == MSM_VIDC_ENCODER)
901 return msm_venc_qbuf(instance, b);
902
903err_invalid_buff:
904 return -EINVAL;
905}
906EXPORT_SYMBOL(msm_vidc_qbuf);
907
908int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
909{
910 struct msm_vidc_inst *inst = instance;
911 struct buffer_info *buffer_info = NULL;
912 int i = 0, rc = 0;
913
914 if (!inst || !b || !valid_v4l2_buffer(b, inst))
915 return -EINVAL;
916
917 if (inst->session_type == MSM_VIDC_DECODER)
918 rc = msm_vdec_dqbuf(instance, b);
919 if (inst->session_type == MSM_VIDC_ENCODER)
920 rc = msm_venc_dqbuf(instance, b);
921
922 if (rc)
923 return rc;
924
925 for (i = b->length - 1; i >= 0 ; i--) {
926 if (EXTRADATA_IDX(b->length) &&
927 i == EXTRADATA_IDX(b->length)) {
928 continue;
929 }
930 buffer_info = device_to_uvaddr(&inst->registeredbufs,
931 b->m.planes[i].m.userptr);
932
933 if (!buffer_info) {
934 dprintk(VIDC_ERR,
935 "%s no buffer info registered for buffer addr: %#lx\n",
936 __func__, b->m.planes[i].m.userptr);
937 return -EINVAL;
938 }
939
940 b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
941 b->m.planes[i].reserved[0] = buffer_info->fd[i];
942 b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
943 if (!(inst->flags & VIDC_SECURE) && !b->m.planes[i].m.userptr) {
944 dprintk(VIDC_ERR,
945 "%s: Failed to find user virtual address, %#lx, %d, %d\n",
946 __func__, b->m.planes[i].m.userptr, b->type, i);
947 return -EINVAL;
948 }
949 }
950
951 if (!buffer_info) {
952 dprintk(VIDC_ERR,
953 "%s: error - no buffer info found in registered list\n",
954 __func__);
955 return -EINVAL;
956 }
957
958 rc = output_buffer_cache_invalidate(inst, buffer_info);
959 if (rc)
960 return rc;
961
962 if (is_dynamic_output_buffer_mode(b, inst)) {
963 buffer_info->dequeued = true;
964
965 dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
966 buffer_info->fd[0]);
967 mutex_lock(&inst->registeredbufs.lock);
968 rc = unmap_and_deregister_buf(inst, buffer_info);
969 mutex_unlock(&inst->registeredbufs.lock);
970 }
971
972 return rc;
973}
974EXPORT_SYMBOL(msm_vidc_dqbuf);
975
976int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
977{
978 struct msm_vidc_inst *inst = instance;
979
980 if (!inst)
981 return -EINVAL;
982
983 if (inst->session_type == MSM_VIDC_DECODER)
984 return msm_vdec_streamon(instance, i);
985 if (inst->session_type == MSM_VIDC_ENCODER)
986 return msm_venc_streamon(instance, i);
987 return -EINVAL;
988}
989EXPORT_SYMBOL(msm_vidc_streamon);
990
991int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i)
992{
993 struct msm_vidc_inst *inst = instance;
994
995 if (!inst)
996 return -EINVAL;
997
998 if (inst->session_type == MSM_VIDC_DECODER)
999 return msm_vdec_streamoff(instance, i);
1000 if (inst->session_type == MSM_VIDC_ENCODER)
1001 return msm_venc_streamoff(instance, i);
1002 return -EINVAL;
1003}
1004EXPORT_SYMBOL(msm_vidc_streamoff);
1005
1006int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
1007{
1008 struct msm_vidc_inst *inst = instance;
1009 struct msm_vidc_capability *capability = NULL;
1010
1011 if (!inst || !fsize) {
1012 dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
1013 __func__, inst, fsize);
1014 return -EINVAL;
1015 }
1016 if (!inst->core)
1017 return -EINVAL;
1018
1019 capability = &inst->capability;
1020 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1021 fsize->stepwise.min_width = capability->width.min;
1022 fsize->stepwise.max_width = capability->width.max;
1023 fsize->stepwise.step_width = capability->width.step_size;
1024 fsize->stepwise.min_height = capability->height.min;
1025 fsize->stepwise.max_height = capability->height.max;
1026 fsize->stepwise.step_height = capability->height.step_size;
1027 return 0;
1028}
1029EXPORT_SYMBOL(msm_vidc_enum_framesizes);
1030
1031static void *vidc_get_userptr(struct device *dev, unsigned long vaddr,
1032 unsigned long size, enum dma_data_direction dma_dir)
1033{
1034 return (void *)0xdeadbeef;
1035}
1036
1037static void vidc_put_userptr(void *buf_priv)
1038{
1039
1040}
1041static const struct vb2_mem_ops msm_vidc_vb2_mem_ops = {
1042 .get_userptr = vidc_get_userptr,
1043 .put_userptr = vidc_put_userptr,
1044};
1045
1046static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
1047 enum v4l2_buf_type type, enum session_type sess)
1048{
1049 struct vb2_queue *q = NULL;
1050
1051 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1052 q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
1053 } else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1054 q = &inst->bufq[OUTPUT_PORT].vb2_bufq;
1055 } else {
1056 dprintk(VIDC_ERR, "buf_type = %d not recognised\n", type);
1057 return -EINVAL;
1058 }
1059
1060 q->type = type;
1061 q->io_modes = VB2_MMAP | VB2_USERPTR;
1062 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1063
1064 if (sess == MSM_VIDC_DECODER)
1065 q->ops = msm_vdec_get_vb2q_ops();
1066 else if (sess == MSM_VIDC_ENCODER)
1067 q->ops = msm_venc_get_vb2q_ops();
1068 q->mem_ops = &msm_vidc_vb2_mem_ops;
1069 q->drv_priv = inst;
1070 return vb2_queue_init(q);
1071}
1072
1073static int setup_event_queue(void *inst,
1074 struct video_device *pvdev)
1075{
1076 int rc = 0;
1077 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1078
1079 v4l2_fh_init(&vidc_inst->event_handler, pvdev);
1080 v4l2_fh_add(&vidc_inst->event_handler);
1081
1082 return rc;
1083}
1084
1085int msm_vidc_subscribe_event(void *inst,
1086 const struct v4l2_event_subscription *sub)
1087{
1088 int rc = 0;
1089 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1090
1091 if (!inst || !sub)
1092 return -EINVAL;
1093
1094 rc = v4l2_event_subscribe(&vidc_inst->event_handler,
1095 sub, MAX_EVENTS, NULL);
1096 return rc;
1097}
1098EXPORT_SYMBOL(msm_vidc_subscribe_event);
1099
1100int msm_vidc_unsubscribe_event(void *inst,
1101 const struct v4l2_event_subscription *sub)
1102{
1103 int rc = 0;
1104 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1105
1106 if (!inst || !sub)
1107 return -EINVAL;
1108
1109 rc = v4l2_event_unsubscribe(&vidc_inst->event_handler, sub);
1110 return rc;
1111}
1112EXPORT_SYMBOL(msm_vidc_unsubscribe_event);
1113
1114int msm_vidc_dqevent(void *inst, struct v4l2_event *event)
1115{
1116 int rc = 0;
1117 struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
1118
1119 if (!inst || !event)
1120 return -EINVAL;
1121
1122 rc = v4l2_event_dequeue(&vidc_inst->event_handler, event, false);
1123 return rc;
1124}
1125EXPORT_SYMBOL(msm_vidc_dqevent);
1126
1127static bool msm_vidc_check_for_inst_overload(struct msm_vidc_core *core)
1128{
1129 u32 instance_count = 0;
1130 u32 secure_instance_count = 0;
1131 struct msm_vidc_inst *inst = NULL;
1132 bool overload = false;
1133
1134 mutex_lock(&core->lock);
1135 list_for_each_entry(inst, &core->instances, list) {
1136 instance_count++;
1137 /* This flag is not updated yet for the current instance */
1138 if (inst->flags & VIDC_SECURE)
1139 secure_instance_count++;
1140 }
1141 mutex_unlock(&core->lock);
1142
1143 /* Instance count includes current instance as well. */
1144
1145 if ((instance_count > core->resources.max_inst_count) ||
1146 (secure_instance_count > core->resources.max_secure_inst_count))
1147 overload = true;
1148 return overload;
1149}
1150
1151void *msm_vidc_open(int core_id, int session_type)
1152{
1153 struct msm_vidc_inst *inst = NULL;
1154 struct msm_vidc_core *core = NULL;
1155 int rc = 0;
1156 int i = 0;
1157
1158 if (core_id >= MSM_VIDC_CORES_MAX ||
1159 session_type >= MSM_VIDC_MAX_DEVICES) {
1160 dprintk(VIDC_ERR, "Invalid input, core_id = %d, session = %d\n",
1161 core_id, session_type);
1162 goto err_invalid_core;
1163 }
1164 core = get_vidc_core(core_id);
1165 if (!core) {
1166 dprintk(VIDC_ERR,
1167 "Failed to find core for core_id = %d\n", core_id);
1168 goto err_invalid_core;
1169 }
1170
1171 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
1172 if (!inst) {
1173 dprintk(VIDC_ERR, "Failed to allocate memory\n");
1174 rc = -ENOMEM;
1175 goto err_invalid_core;
1176 }
1177
1178 pr_info(VIDC_DBG_TAG "Opening video instance: %pK, %d\n",
1179 VIDC_MSG_PRIO2STRING(VIDC_INFO), inst, session_type);
1180 mutex_init(&inst->sync_lock);
1181 mutex_init(&inst->bufq[CAPTURE_PORT].lock);
1182 mutex_init(&inst->bufq[OUTPUT_PORT].lock);
1183 mutex_init(&inst->lock);
1184
1185 INIT_MSM_VIDC_LIST(&inst->pendingq);
1186 INIT_MSM_VIDC_LIST(&inst->scratchbufs);
1187 INIT_MSM_VIDC_LIST(&inst->persistbufs);
1188 INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
1189 INIT_MSM_VIDC_LIST(&inst->outputbufs);
1190 INIT_MSM_VIDC_LIST(&inst->registeredbufs);
1191
1192 kref_init(&inst->kref);
1193
1194 inst->session_type = session_type;
1195 inst->state = MSM_VIDC_CORE_UNINIT_DONE;
1196 inst->core = core;
1197 inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
1198 inst->instant_bitrate = 0;
1199 inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
1200 inst->colour_space = MSM_VIDC_BT601_6_525;
1201
1202 for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
1203 i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
1204 init_completion(&inst->completions[i]);
1205 }
1206 inst->mem_client = msm_smem_new_client(SMEM_ION,
1207 &inst->core->resources, session_type);
1208 if (!inst->mem_client) {
1209 dprintk(VIDC_ERR, "Failed to create memory client\n");
1210 goto fail_mem_client;
1211 }
1212 if (session_type == MSM_VIDC_DECODER) {
1213 msm_vdec_inst_init(inst);
1214 rc = msm_vdec_ctrl_init(inst);
1215 } else if (session_type == MSM_VIDC_ENCODER) {
1216 msm_venc_inst_init(inst);
1217 rc = msm_venc_ctrl_init(inst);
1218 }
1219
1220 if (rc)
1221 goto fail_bufq_capture;
1222
1223 msm_dcvs_init(inst);
1224 rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
1225 session_type);
1226 if (rc) {
1227 dprintk(VIDC_ERR,
1228 "Failed to initialize vb2 queue on capture port\n");
1229 goto fail_bufq_capture;
1230 }
1231 rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1232 session_type);
1233 if (rc) {
1234 dprintk(VIDC_ERR,
1235 "Failed to initialize vb2 queue on capture port\n");
1236 goto fail_bufq_output;
1237 }
1238
1239 setup_event_queue(inst, &core->vdev[session_type].vdev);
1240
1241 mutex_lock(&core->lock);
1242 list_add_tail(&inst->list, &core->instances);
1243 mutex_unlock(&core->lock);
1244
1245 rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
1246 if (rc) {
1247 dprintk(VIDC_ERR,
1248 "Failed to move video instance to init state\n");
1249 goto fail_init;
1250 }
1251
1252 if (msm_vidc_check_for_inst_overload(core)) {
1253 dprintk(VIDC_ERR,
1254 "Instance count reached Max limit, rejecting session");
1255 goto fail_init;
1256 }
1257
1258 inst->debugfs_root =
1259 msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
1260
1261 return inst;
1262fail_init:
1263 v4l2_fh_del(&inst->event_handler);
1264 v4l2_fh_exit(&inst->event_handler);
1265 vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
1266
1267 mutex_lock(&core->lock);
1268 list_del(&inst->list);
1269 mutex_unlock(&core->lock);
1270
1271fail_bufq_output:
1272 vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
1273fail_bufq_capture:
1274 msm_comm_ctrl_deinit(inst);
1275 msm_smem_delete_client(inst->mem_client);
1276fail_mem_client:
1277 kfree(inst);
1278 inst = NULL;
1279err_invalid_core:
1280 return inst;
1281}
1282EXPORT_SYMBOL(msm_vidc_open);
1283
1284static void cleanup_instance(struct msm_vidc_inst *inst)
1285{
1286 struct vb2_buf_entry *entry, *dummy;
1287
1288 if (inst) {
1289
1290 mutex_lock(&inst->pendingq.lock);
1291 list_for_each_entry_safe(entry, dummy, &inst->pendingq.list,
1292 list) {
1293 list_del(&entry->list);
1294 kfree(entry);
1295 }
1296 mutex_unlock(&inst->pendingq.lock);
1297
1298 if (msm_comm_release_scratch_buffers(inst, false)) {
1299 dprintk(VIDC_ERR,
1300 "Failed to release scratch buffers\n");
1301 }
1302
1303 if (msm_comm_release_persist_buffers(inst)) {
1304 dprintk(VIDC_ERR,
1305 "Failed to release persist buffers\n");
1306 }
1307
1308 if (msm_comm_release_output_buffers(inst)) {
1309 dprintk(VIDC_ERR,
1310 "Failed to release output buffers\n");
1311 }
1312
1313 if (inst->extradata_handle)
1314 msm_comm_smem_free(inst, inst->extradata_handle);
1315
1316 mutex_lock(&inst->pending_getpropq.lock);
1317 if (!list_empty(&inst->pending_getpropq.list)) {
1318 dprintk(VIDC_ERR,
1319 "pending_getpropq not empty\n");
1320 WARN_ON(VIDC_DBG_WARN_ENABLE);
1321 }
1322 mutex_unlock(&inst->pending_getpropq.lock);
1323 }
1324}
1325
1326int msm_vidc_destroy(struct msm_vidc_inst *inst)
1327{
1328 struct msm_vidc_core *core;
1329 int i = 0;
1330
1331 if (!inst || !inst->core)
1332 return -EINVAL;
1333
1334 core = inst->core;
1335
1336 mutex_lock(&core->lock);
1337 /* inst->list lives in core->instances */
1338 list_del(&inst->list);
1339 mutex_unlock(&core->lock);
1340
1341 msm_comm_ctrl_deinit(inst);
1342
1343 v4l2_fh_del(&inst->event_handler);
1344 v4l2_fh_exit(&inst->event_handler);
1345
1346 for (i = 0; i < MAX_PORT_NUM; i++)
1347 vb2_queue_release(&inst->bufq[i].vb2_bufq);
1348
1349 mutex_destroy(&inst->sync_lock);
1350 mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
1351 mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
1352 mutex_destroy(&inst->lock);
1353
1354 msm_vidc_debugfs_deinit_inst(inst);
1355 pr_info(VIDC_DBG_TAG "Closed video instance: %pK\n",
1356 VIDC_MSG_PRIO2STRING(VIDC_INFO), inst);
1357 kfree(inst);
1358 return 0;
1359}
1360
1361int msm_vidc_close(void *instance)
1362{
1363 void close_helper(struct kref *kref)
1364 {
1365 struct msm_vidc_inst *inst = container_of(kref,
1366 struct msm_vidc_inst, kref);
1367
1368 msm_vidc_destroy(inst);
1369 }
1370
1371 struct msm_vidc_inst *inst = instance;
1372 struct buffer_info *bi, *dummy;
1373 int rc = 0;
1374
1375 if (!inst || !inst->core)
1376 return -EINVAL;
1377
1378
1379 mutex_lock(&inst->registeredbufs.lock);
1380 list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
1381 if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1382 int i = 0;
1383
1384 list_del(&bi->list);
1385
1386 for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
1387 i++) {
1388 if (bi->handle[i] && bi->mapped[i])
1389 msm_comm_smem_free(inst, bi->handle[i]);
1390 }
1391
1392 kfree(bi);
1393 }
1394 }
1395 mutex_unlock(&inst->registeredbufs.lock);
1396
1397 cleanup_instance(inst);
1398 if (inst->state != MSM_VIDC_CORE_INVALID &&
1399 inst->core->state != VIDC_CORE_INVALID)
1400 rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
1401 else
1402 rc = msm_comm_force_cleanup(inst);
1403 if (rc)
1404 dprintk(VIDC_ERR,
1405 "Failed to move video instance to uninit state\n");
1406
1407 msm_comm_session_clean(inst);
1408 msm_smem_delete_client(inst->mem_client);
1409
1410 kref_put(&inst->kref, close_helper);
1411 return 0;
1412}
1413EXPORT_SYMBOL(msm_vidc_close);
1414
1415int msm_vidc_suspend(int core_id)
1416{
1417 return msm_comm_suspend(core_id);
1418}
1419EXPORT_SYMBOL(msm_vidc_suspend);
1420