blob: 5a4e6e96fe929ef4b8a798da3961c48d1b588344 [file] [log] [blame]
Jing Zhoud352ed12017-03-20 23:59:56 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/debugfs.h>
14#include <linux/videodev2.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
17
18#include "cam_isp_context.h"
19#include "cam_isp_log.h"
20#include "cam_mem_mgr.h"
21#include "cam_sync_api.h"
22
23#undef CDBG
24#define CDBG(fmt, args...) pr_debug(fmt, ##args)
25
26static int __cam_isp_ctx_handle_buf_done_in_activated_state(
27 struct cam_isp_context *ctx_isp,
28 struct cam_isp_hw_done_event_data *done,
29 uint32_t bubble_state)
30{
31 int rc = 0;
32 int i, j;
33 struct cam_ctx_request *req;
34 struct cam_isp_ctx_req *req_isp;
35 struct cam_context *ctx = ctx_isp->base;
36
37 if (list_empty(&ctx->active_req_list)) {
38 CDBG("Buf done with no active request!\n");
39 goto end;
40 }
41
42 CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
43
44 req = list_first_entry(&ctx->active_req_list,
45 struct cam_ctx_request, list);
46 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
47 for (i = 0; i < done->num_handles; i++) {
48 for (j = 0; j < req_isp->num_fence_map_out; j++) {
49 if (done->resource_handle[i] ==
50 req_isp->fence_map_out[j].resource_handle)
51 break;
52 }
53
54 if (j == req_isp->num_fence_map_out) {
55 pr_err("Can not find matching lane handle 0x%x!\n",
56 done->resource_handle[i]);
57 rc = -EINVAL;
58 continue;
59 }
60
61 if (!bubble_state) {
Jing Zhoubb536a82017-05-18 15:20:38 -070062 CDBG("%s: Sync with success: fd 0x%x\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -070063 req_isp->fence_map_out[j].sync_id);
Jing Zhoubb536a82017-05-18 15:20:38 -070064 rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
Jing Zhoud352ed12017-03-20 23:59:56 -070065 CAM_SYNC_STATE_SIGNALED_SUCCESS);
Jing Zhoubb536a82017-05-18 15:20:38 -070066 if (rc)
67 pr_err("%s: Sync failed with rc = %d\n",
68 __func__, rc);
69
Jing Zhoud352ed12017-03-20 23:59:56 -070070 } else if (!req_isp->bubble_report) {
Jing Zhoubb536a82017-05-18 15:20:38 -070071 CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -070072 req_isp->fence_map_out[j].sync_id);
Jing Zhoubb536a82017-05-18 15:20:38 -070073 rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
Jing Zhoud352ed12017-03-20 23:59:56 -070074 CAM_SYNC_STATE_SIGNALED_ERROR);
Jing Zhoubb536a82017-05-18 15:20:38 -070075 if (rc)
76 pr_err("%s: Sync failed with rc = %d\n",
77 __func__, rc);
Jing Zhoud352ed12017-03-20 23:59:56 -070078 } else {
79 /*
80 * Ignore the buffer done if bubble detect is on
81 * In most case, active list should be empty when
82 * bubble detects. But for safety, we just move the
83 * current active request to the pending list here.
84 */
85 list_del_init(&req->list);
86 list_add(&req->list, &ctx->pending_req_list);
87 continue;
88 }
89
90 CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
91 req->request_id,
92 req_isp->fence_map_out[j].sync_id);
93 req_isp->num_acked++;
94 req_isp->fence_map_out[j].sync_id = -1;
95 }
96
97 if (req_isp->num_acked == req_isp->num_fence_map_out) {
98 list_del_init(&req->list);
99 list_add_tail(&req->list, &ctx->free_req_list);
100 }
101
102end:
103 return rc;
104}
105
106static int __cam_isp_ctx_reg_upd_in_activated_state(
107 struct cam_isp_context *ctx_isp, void *evt_data)
108{
109 int rc = 0;
110 struct cam_ctx_request *req;
111 struct cam_context *ctx = ctx_isp->base;
112 struct cam_isp_ctx_req *req_isp;
113
114 if (list_empty(&ctx->pending_req_list)) {
115 pr_err("Reg upd ack with no pending request\n");
116 goto end;
117 }
118 req = list_first_entry(&ctx->pending_req_list,
119 struct cam_ctx_request, list);
120 list_del_init(&req->list);
121
122 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
123 if (req_isp->num_fence_map_out != 0) {
124 CDBG("%s: move request %lld to active list\n", __func__,
125 req->request_id);
126 if (!list_empty(&ctx->active_req_list))
127 pr_err("%s: More than one entry in active list\n",
128 __func__);
129 list_add_tail(&req->list, &ctx->active_req_list);
130 } else {
131 /* no io config, so the request is completed. */
132 list_add_tail(&req->list, &ctx->free_req_list);
133 }
134
135 /*
136 * This function only called directly from applied and bubble applied
137 * state so change substate here.
138 */
139 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
140 CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
141
142end:
143 return rc;
144}
145
146static int __cam_isp_ctx_notify_sof_in_actived_state(
147 struct cam_isp_context *ctx_isp, void *evt_data)
148{
149 int rc = 0;
150 struct cam_req_mgr_sof_notify notify;
151 struct cam_context *ctx = ctx_isp->base;
152
153 /* notify reqmgr with sof signal */
154 if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
155 notify.link_hdl = ctx->link_hdl;
156 notify.dev_hdl = ctx->dev_hdl;
157 notify.frame_id = ctx_isp->frame_id;
158
159 ctx->ctx_crm_intf->notify_sof(&notify);
160 CDBG("%s: Notify CRM SOF frame %lld\n", __func__,
161 ctx_isp->frame_id);
162 } else {
163 pr_err("%s: Can not notify SOF to CRM\n", __func__);
164 }
165
166 return rc;
167}
168
169
170static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
171 void *evt_data)
172{
173 int rc = 0;
174
175 CDBG("%s: Enter\n", __func__);
176 ctx_isp->frame_id++;
177
178 return rc;
179}
180
181static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
182 void *evt_data)
183{
184 int rc = 0;
185 struct cam_ctx_request *req;
186 struct cam_isp_ctx_req *req_isp;
187 struct cam_context *ctx = ctx_isp->base;
188
189 if (ctx->state != CAM_CTX_ACTIVATED) {
190 CDBG("%s: invalid RUP\n", __func__);
191 goto end;
192 }
193
194 /*
195 * This is for the first update. The initial setting will
196 * cause the reg_upd in the first frame.
197 */
198 if (!list_empty(&ctx->pending_req_list)) {
199 req = list_first_entry(&ctx->pending_req_list,
200 struct cam_ctx_request, list);
201 list_del_init(&req->list);
202 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
203 if (req_isp->num_fence_map_out == req_isp->num_acked)
204 list_add_tail(&req->list, &ctx->free_req_list);
205 else {
206 /* need to handle the buf done */
207 list_add_tail(&req->list, &ctx->active_req_list);
208 ctx_isp->substate_activated =
209 CAM_ISP_CTX_ACTIVATED_EPOCH;
210 }
211 }
212end:
213 return rc;
214}
215
216static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
217 void *evt_data)
218{
219 int rc = 0;
220 struct cam_ctx_request *req;
221 struct cam_isp_ctx_req *req_isp;
222 struct cam_context *ctx = ctx_isp->base;
223
224 if (list_empty(&ctx->pending_req_list)) {
225 /*
226 * If no pending req in epoch, this is an error case.
227 * The recovery is to go back to sof state
228 */
229 pr_err("%s: No pending request\n", __func__);
230 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
231 goto end;
232 }
233
234 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
235 list);
236 req_isp = (struct cam_isp_ctx_req *)req->req_priv;
237
Jing Zhou45b55cc2017-05-16 17:27:18 -0700238 CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
Jing Zhoud352ed12017-03-20 23:59:56 -0700239 if (req_isp->bubble_report && ctx->ctx_crm_intf &&
240 ctx->ctx_crm_intf->notify_err) {
241 struct cam_req_mgr_error_notify notify;
242
243 notify.link_hdl = ctx->link_hdl;
244 notify.dev_hdl = ctx->dev_hdl;
245 notify.req_id = req->request_id;
246 notify.error = CRM_KMD_ERR_BUBBLE;
247 ctx->ctx_crm_intf->notify_err(&notify);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700248 CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700249 ctx_isp->frame_id);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700250 } else {
251 /*
252 * Since can not bubble report, always move the request to
253 * active list.
254 */
255 list_del_init(&req->list);
256 list_add_tail(&req->list, &ctx->active_req_list);
257 req_isp->bubble_report = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700258 }
259
260 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
261 CDBG("%s: next substate %d\n", __func__,
262 ctx_isp->substate_activated);
263end:
264 return rc;
265}
266
267
268static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
269 void *evt_data)
270{
271 int rc = 0;
272 struct cam_isp_hw_done_event_data *done =
273 (struct cam_isp_hw_done_event_data *) evt_data;
274
275 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
276 return rc;
277}
278
279
280static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
281 void *evt_data)
282{
283 int rc = 0;
284
285 ctx_isp->frame_id++;
286 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
Jing Zhoubb536a82017-05-18 15:20:38 -0700287 CDBG("%s: next substate %d\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700288 ctx_isp->substate_activated);
289
290 return rc;
291}
292
293static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
294 void *evt_data)
295{
296 int rc = 0;
297 struct cam_isp_hw_done_event_data *done =
298 (struct cam_isp_hw_done_event_data *) evt_data;
299
300 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
301 return rc;
302}
303
304
305static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
306 void *evt_data)
307{
308 ctx_isp->frame_id++;
309 return 0;
310}
311
312static int __cam_isp_ctx_buf_done_in_bubble(
313 struct cam_isp_context *ctx_isp, void *evt_data)
314{
315 int rc = 0;
316 struct cam_isp_hw_done_event_data *done =
317 (struct cam_isp_hw_done_event_data *) evt_data;
318
319 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
320 return rc;
321}
322
323static int __cam_isp_ctx_sof_in_bubble_applied(
324 struct cam_isp_context *ctx_isp, void *evt_data)
325{
326 ctx_isp->frame_id++;
327 return 0;
328}
329
330
331static int __cam_isp_ctx_epoch_in_bubble_applied(
332 struct cam_isp_context *ctx_isp, void *evt_data)
333{
334 struct cam_ctx_request *req;
335 struct cam_isp_ctx_req *req_isp;
336 struct cam_context *ctx = ctx_isp->base;
337
338 /*
339 * This means we missed the reg upd ack. So we need to
340 * transition to BUBBLE state again.
341 */
342
343 if (list_empty(&ctx->pending_req_list)) {
344 /*
345 * If no pending req in epoch, this is an error case.
346 * Just go back to the bubble state.
347 */
348 pr_err("%s: No pending request.\n", __func__);
349 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
350 goto end;
351 }
352
353 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
354 list);
355 req_isp = (struct cam_isp_ctx_req *)req->req_priv;
356
357 if (req_isp->bubble_report && ctx->ctx_crm_intf &&
358 ctx->ctx_crm_intf->notify_err) {
359 struct cam_req_mgr_error_notify notify;
360
361 notify.link_hdl = ctx->link_hdl;
362 notify.dev_hdl = ctx->dev_hdl;
363 notify.req_id = req->request_id;
364 notify.error = CRM_KMD_ERR_BUBBLE;
365 ctx->ctx_crm_intf->notify_err(&notify);
366 CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
367 ctx_isp->frame_id);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700368 } else {
369 /*
370 * If we can not report bubble, then treat it as if no bubble
371 * report. Just move the req to active list.
372 */
373 list_del_init(&req->list);
374 list_add_tail(&req->list, &ctx->active_req_list);
375 req_isp->bubble_report = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700376 }
377
378 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
379 CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
380end:
381 return 0;
382}
383
384static int __cam_isp_ctx_buf_done_in_bubble_applied(
385 struct cam_isp_context *ctx_isp, void *evt_data)
386{
387 int rc = 0;
388 struct cam_isp_hw_done_event_data *done =
389 (struct cam_isp_hw_done_event_data *) evt_data;
390
391 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
392 return rc;
393}
394
395static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
396 void *evt_data)
397{
398 int rc = 0;
399 struct cam_ctx_request *req;
400 struct cam_req_mgr_error_notify notify;
401
402 struct cam_context *ctx = ctx_isp->base;
403 struct cam_isp_hw_error_event_data *error_event_data =
404 (struct cam_isp_hw_error_event_data *)evt_data;
405
406 uint32_t error_type = error_event_data->error_type;
407
408 CDBG("%s: Enter error_type = %d\n", __func__, error_type);
409 if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
410 (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
411 notify.error = CRM_KMD_ERR_FATAL;
412
413 /*
414 * Need to check the active req
415 * move all of them to the pending request list
416 * Note this funciton need revisit!
417 */
418
419 if (list_empty(&ctx->active_req_list)) {
420 pr_err("handling error with no active request!\n");
421 rc = -EINVAL;
422 goto end;
423 }
424
425 req = list_first_entry(&ctx->active_req_list,
426 struct cam_ctx_request, list);
427
428 if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
429 notify.link_hdl = ctx->link_hdl;
430 notify.dev_hdl = ctx->dev_hdl;
431 notify.req_id = req->request_id;
432
433 ctx->ctx_crm_intf->notify_err(&notify);
434 pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
435 ctx_isp->frame_id);
436 } else {
437 pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
438 rc = -EFAULT;
439 }
440
441 list_del_init(&req->list);
442 list_add(&req->list, &ctx->pending_req_list);
443 /* might need to check if active list is empty */
444
445end:
446 CDBG("%s: Exit\n", __func__);
447 return rc;
448}
449
450static struct cam_isp_ctx_irq_ops
451 cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
452 /* SOF */
453 {
454 .irq_ops = {
455 NULL,
456 __cam_isp_ctx_sof_in_sof,
457 __cam_isp_ctx_reg_upd_in_sof,
458 __cam_isp_ctx_notify_sof_in_actived_state,
459 NULL,
460 NULL,
461 },
462 },
463 /* APPLIED */
464 {
465 .irq_ops = {
466 __cam_isp_ctx_handle_error,
467 __cam_isp_ctx_sof_in_sof,
468 __cam_isp_ctx_reg_upd_in_activated_state,
469 __cam_isp_ctx_epoch_in_applied,
470 NULL,
471 __cam_isp_ctx_buf_done_in_applied,
472 },
473 },
474 /* EPOCH */
475 {
476 .irq_ops = {
477 __cam_isp_ctx_handle_error,
478 __cam_isp_ctx_sof_in_epoch,
479 NULL,
480 __cam_isp_ctx_notify_sof_in_actived_state,
481 NULL,
482 __cam_isp_ctx_buf_done_in_epoch,
483 },
484 },
485 /* BUBBLE */
486 {
487 .irq_ops = {
488 NULL,
489 __cam_isp_ctx_sof_in_bubble,
490 NULL,
491 __cam_isp_ctx_notify_sof_in_actived_state,
492 NULL,
493 __cam_isp_ctx_buf_done_in_bubble,
494 },
495 },
496 /* Bubble Applied */
497 {
498 .irq_ops = {
499 NULL,
500 __cam_isp_ctx_sof_in_bubble_applied,
501 __cam_isp_ctx_reg_upd_in_activated_state,
502 __cam_isp_ctx_epoch_in_bubble_applied,
503 NULL,
504 __cam_isp_ctx_buf_done_in_bubble_applied,
505 },
506 },
507 /* HALT */
508 {
509 },
510};
511
512static int __cam_isp_ctx_apply_req_in_activated_state(
513 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
514 uint32_t next_state)
515{
516 int rc = 0;
Jing Zhou9eabf472017-05-16 11:59:41 -0700517 int cnt = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700518 struct cam_ctx_request *req;
519 struct cam_isp_ctx_req *req_isp;
520 struct cam_isp_context *ctx_isp;
521 struct cam_hw_config_args cfg;
522
523 if (list_empty(&ctx->pending_req_list)) {
524 pr_err("%s: No available request for Apply id %lld\n",
525 __func__, apply->request_id);
526 rc = -EFAULT;
527 goto end;
528 }
Jing Zhou9eabf472017-05-16 11:59:41 -0700529
530 /*
531 * When the pipeline has issue, the requests can be queued up in the
532 * pipeline. In this case, we should reject the additional request.
533 * The maximum number of request allowed to be outstanding is 2.
534 *
535 */
536 list_for_each_entry(req, &ctx->active_req_list, list) {
537 if (++cnt > 2) {
538 pr_err("%s: Apply failed due to pipeline congestion\n",
539 __func__);
540 rc = -EFAULT;
541 goto end;
542 }
543 }
544
Jing Zhoud352ed12017-03-20 23:59:56 -0700545 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
546 list);
547
548 /*
549 * Check whehter the request id is matching the tip, if not, this means
550 * we are in the middle of the error handling. Need to reject this apply
551 */
552 if (req->request_id != apply->request_id) {
553 rc = -EFAULT;
554 goto end;
555 }
556
Jing Zhou45b55cc2017-05-16 17:27:18 -0700557 CDBG("%s: Apply request %lld\n", __func__, req->request_id);
Jing Zhoud352ed12017-03-20 23:59:56 -0700558 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
559 ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
560
561 req_isp->bubble_report = apply->report_if_bubble;
562
563 cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
564 cfg.hw_update_entries = req_isp->cfg;
565 cfg.num_hw_update_entries = req_isp->num_cfg;
566
567 rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
568 if (rc) {
569 pr_err("%s: Can not apply the configuration\n", __func__);
570 } else {
571 spin_lock(&ctx->lock);
572 ctx_isp->substate_activated = next_state;
573 CDBG("%s: new state %d\n", __func__, next_state);
574 spin_unlock(&ctx->lock);
575 }
576end:
577 return rc;
578}
579
580static int __cam_isp_ctx_apply_req_in_sof(
581 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
582{
583 int rc = 0;
584 struct cam_isp_context *ctx_isp =
585 (struct cam_isp_context *) ctx->ctx_priv;
586
587 CDBG("%s: current substate %d\n", __func__,
588 ctx_isp->substate_activated);
589 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
590 CAM_ISP_CTX_ACTIVATED_APPLIED);
591 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
592
593 return rc;
594}
595
596static int __cam_isp_ctx_apply_req_in_epoch(
597 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
598{
599 int rc = 0;
600 struct cam_isp_context *ctx_isp =
601 (struct cam_isp_context *) ctx->ctx_priv;
602
603 CDBG("%s: current substate %d\n", __func__,
604 ctx_isp->substate_activated);
605 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
606 CAM_ISP_CTX_ACTIVATED_APPLIED);
607 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
608
609 return rc;
610}
611
612static int __cam_isp_ctx_apply_req_in_bubble(
613 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
614{
615 int rc = 0;
616 struct cam_isp_context *ctx_isp =
617 (struct cam_isp_context *) ctx->ctx_priv;
618
619 CDBG("%s: current substate %d\n", __func__,
620 ctx_isp->substate_activated);
621 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
622 CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
623 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
624
625 return rc;
626}
627
628static struct cam_ctx_ops
629 cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
630 /* SOF */
631 {
632 .ioctl_ops = {},
633 .crm_ops = {
634 .apply_req = __cam_isp_ctx_apply_req_in_sof,
635 },
636 .irq_ops = NULL,
637 },
638 /* APPLIED */
639 {
640 .ioctl_ops = {},
641 .crm_ops = {},
642 .irq_ops = NULL,
643 },
644 /* EPOCH */
645 {
646 .ioctl_ops = {},
647 .crm_ops = {
648 .apply_req = __cam_isp_ctx_apply_req_in_epoch,
649 },
650 .irq_ops = NULL,
651 },
652 /* BUBBLE */
653 {
654 .ioctl_ops = {},
655 .crm_ops = {
656 .apply_req = __cam_isp_ctx_apply_req_in_bubble,
657 },
658 .irq_ops = NULL,
659 },
660 /* Bubble Applied */
661 {
662 .ioctl_ops = {},
663 .crm_ops = {},
664 .irq_ops = NULL,
665 },
666 /* HALT */
667 {
668 .ioctl_ops = {},
669 .crm_ops = {},
670 .irq_ops = NULL,
671 },
672};
673
674
675/* top level state machine */
676static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
677 struct cam_release_dev_cmd *cmd)
678{
679 int rc = 0;
680 int i;
681 struct cam_hw_release_args rel_arg;
682 struct cam_ctx_request *req;
683 struct cam_isp_ctx_req *req_isp;
684 struct cam_isp_context *ctx_isp =
685 (struct cam_isp_context *) ctx->ctx_priv;
686
687 if (ctx_isp->hw_ctx) {
688 rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
689 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
690 &rel_arg);
691 ctx_isp->hw_ctx = NULL;
692 }
693
694 ctx->session_hdl = 0;
695 ctx->dev_hdl = 0;
696 ctx->link_hdl = 0;
Jing Zhoue71fd4a2017-05-15 19:44:34 -0700697 ctx->ctx_crm_intf = NULL;
Jing Zhoud352ed12017-03-20 23:59:56 -0700698 ctx_isp->frame_id = 0;
699
700 /*
701 * Ideally, we should never have any active request here.
702 * But we still add some sanity check code here to help the debug
703 */
704 if (!list_empty(&ctx->active_req_list))
705 pr_err("%s: Active list is empty.\n", __func__);
706
707 /* flush the pending list */
708 while (!list_empty(&ctx->pending_req_list)) {
709 req = list_first_entry(&ctx->pending_req_list,
710 struct cam_ctx_request, list);
711 list_del_init(&req->list);
712 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
713 pr_err("%s: signal fence in pending list. fence num %d\n",
714 __func__, req_isp->num_fence_map_out);
715 for (i = 0; i < req_isp->num_fence_map_out; i++) {
716 if (req_isp->fence_map_out[i].sync_id != -1) {
717 cam_sync_signal(
718 req_isp->fence_map_out[i].sync_id,
719 CAM_SYNC_STATE_SIGNALED_ERROR);
720 }
721 }
722 list_add_tail(&req->list, &ctx->free_req_list);
723 }
724 ctx->state = CAM_CTX_AVAILABLE;
725 CDBG("%s: next state %d\n", __func__, ctx->state);
726 return rc;
727}
728
729static int __cam_isp_ctx_config_dev_in_top_state(
730 struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
731{
732 int rc = 0;
733 struct cam_ctx_request *req = NULL;
734 struct cam_isp_ctx_req *req_isp;
735 uint64_t packet_addr;
736 struct cam_packet *packet;
737 size_t len = 0;
738 struct cam_hw_prepare_update_args cfg;
739 struct cam_req_mgr_add_request add_req;
740 struct cam_isp_context *ctx_isp =
741 (struct cam_isp_context *) ctx->ctx_priv;
742
743 CDBG("%s: get free request object......\n", __func__);
744
745 /* get free request */
746 spin_lock(&ctx->lock);
747 if (!list_empty(&ctx->free_req_list)) {
748 req = list_first_entry(&ctx->free_req_list,
749 struct cam_ctx_request, list);
750 list_del_init(&req->list);
751 }
752 spin_unlock(&ctx->lock);
753
754 if (!req) {
755 pr_err("%s: No more request obj free\n", __func__);
756 rc = -ENOMEM;
757 goto end;
758 }
759
760 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
761
762 /* for config dev, only memory handle is supported */
763 /* map packet from the memhandle */
764 rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
765 (uint64_t *) &packet_addr, &len);
766 if (rc != 0) {
767 pr_err("%s: Can not get packet address\n", __func__);
768 rc = -EINVAL;
769 goto free_req;
770 }
771
772 packet = (struct cam_packet *) (packet_addr + cmd->offset);
773 CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
774 CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
775 CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
776 len, cmd->offset);
777 CDBG("%s: Packet request id 0x%llx\n", __func__,
778 packet->header.request_id);
779 CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
780 CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
781
782 /* preprocess the configuration */
783 memset(&cfg, 0, sizeof(cfg));
784 cfg.packet = packet;
785 cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
786 cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
787 cfg.hw_update_entries = req_isp->cfg;
788 cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
789 cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
790 cfg.out_map_entries = req_isp->fence_map_out;
791 cfg.in_map_entries = req_isp->fence_map_in;
792
793 CDBG("%s: try to prepare config packet......\n", __func__);
794
795 rc = ctx->hw_mgr_intf->hw_prepare_update(
796 ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
797 if (rc != 0) {
798 pr_err("%s: Prepare config packet failed in HW layer\n",
799 __func__);
800 rc = -EFAULT;
801 goto free_req;
802 }
803 req_isp->num_cfg = cfg.num_hw_update_entries;
804 req_isp->num_fence_map_out = cfg.num_out_map_entries;
805 req_isp->num_fence_map_in = cfg.num_in_map_entries;
806 req_isp->num_acked = 0;
807
808 CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
809 __func__, req_isp->num_cfg, req_isp->num_fence_map_out,
810 req_isp->num_fence_map_in);
811
812 req->request_id = packet->header.request_id;
813 req->status = 1;
814
815 if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
816 add_req.link_hdl = ctx->link_hdl;
817 add_req.dev_hdl = ctx->dev_hdl;
818 add_req.req_id = req->request_id;
819 rc = ctx->ctx_crm_intf->add_req(&add_req);
820 if (rc) {
821 pr_err("%s: Error: Adding request id=%llu\n", __func__,
822 req->request_id);
823 goto free_req;
824 }
825 }
826
827 CDBG("%s: Packet request id 0x%llx\n", __func__,
828 packet->header.request_id);
829
830 spin_lock(&ctx->lock);
831 list_add_tail(&req->list, &ctx->pending_req_list);
832 spin_unlock(&ctx->lock);
833
834 CDBG("%s: Preprocessing Config %lld successful\n", __func__,
835 req->request_id);
836
837 return rc;
838
839free_req:
840 spin_lock(&ctx->lock);
841 list_add_tail(&req->list, &ctx->free_req_list);
842 spin_unlock(&ctx->lock);
843end:
844 return rc;
845}
846
847static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
848 struct cam_acquire_dev_cmd *cmd)
849{
850 int rc = 0;
851 struct cam_hw_acquire_args param;
852 struct cam_isp_resource *isp_res = NULL;
853 struct cam_create_dev_hdl req_hdl_param;
854 struct cam_hw_release_args release;
855 struct cam_isp_context *ctx_isp =
856 (struct cam_isp_context *) ctx->ctx_priv;
857
858 if (!ctx->hw_mgr_intf) {
859 pr_err("HW interface is not ready!\n");
860 rc = -EFAULT;
861 goto end;
862 }
863
864 CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
865 __func__, cmd->session_handle, cmd->num_resources,
866 cmd->handle_type, cmd->resource_hdl);
867
868 if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
869 pr_err("Too much resources in the acquire!\n");
870 rc = -ENOMEM;
871 goto end;
872 }
873
874 /* for now we only support user pointer */
875 if (cmd->handle_type != 1) {
876 pr_err("%s: Only user pointer is supported!", __func__);
877 rc = -EINVAL;
878 goto end;
879 }
880
881 isp_res = kzalloc(
882 sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
883 if (!isp_res) {
884 rc = -ENOMEM;
885 goto end;
886 }
887
888 CDBG("%s: start copy %d resources from user\n",
889 __func__, cmd->num_resources);
890
891 if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
892 sizeof(*isp_res)*cmd->num_resources)) {
893 rc = -EFAULT;
894 goto free_res;
895 }
896
897 param.context_data = ctx;
898 param.event_cb = ctx->irq_cb_intf;
899 param.num_acq = cmd->num_resources;
900 param.acquire_info = (uint64_t) isp_res;
901
902 /* call HW manager to reserve the resource */
903 rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
904 &param);
905 if (rc != 0) {
906 pr_err("Acquire device failed\n");
907 goto free_res;
908 }
909
910 ctx_isp->hw_ctx = param.ctxt_to_hw_map;
911
912 req_hdl_param.session_hdl = cmd->session_handle;
913 /* bridge is not ready for these flags. so false for now */
914 req_hdl_param.v4l2_sub_dev_flag = 0;
915 req_hdl_param.media_entity_flag = 0;
916 req_hdl_param.ops = ctx->crm_ctx_intf;
917 req_hdl_param.priv = ctx;
918
919 CDBG("%s: get device handle form bridge\n", __func__);
920 ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
921 if (ctx->dev_hdl <= 0) {
922 rc = -EFAULT;
923 pr_err("Can not create device handle\n");
924 goto free_hw;
925 }
926 cmd->dev_handle = ctx->dev_hdl;
927
928 /* store session information */
929 ctx->session_hdl = cmd->session_handle;
930
931 ctx->state = CAM_CTX_ACQUIRED;
932
933 CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
934 kfree(isp_res);
935 return rc;
936
937free_hw:
938 release.ctxt_to_hw_map = ctx_isp->hw_ctx;
939 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
940 ctx_isp->hw_ctx = NULL;
941free_res:
942 kfree(isp_res);
943end:
944 return rc;
945}
946
947static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
948 struct cam_config_dev_cmd *cmd)
949{
950 int rc = 0;
951
952 rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
953
954 if (!rc && ctx->link_hdl)
955 ctx->state = CAM_CTX_READY;
956
957 CDBG("%s: next state %d\n", __func__, ctx->state);
958 return rc;
959}
960
961static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
962 struct cam_req_mgr_core_dev_link_setup *link)
963{
964 int rc = 0;
965
966 CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
967
968 ctx->link_hdl = link->link_hdl;
969 ctx->ctx_crm_intf = link->crm_cb;
970
971 /* change state only if we had the init config */
972 if (!list_empty(&ctx->pending_req_list))
973 ctx->state = CAM_CTX_READY;
974
975 CDBG("%s: next state %d\n", __func__, ctx->state);
976
977 return rc;
978}
979
980static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
981 struct cam_req_mgr_core_dev_link_setup *unlink)
982{
983 int rc = 0;
984
985 ctx->link_hdl = 0;
986 ctx->ctx_crm_intf = NULL;
987
988 return rc;
989}
990
991static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
992 struct cam_req_mgr_device_info *dev_info)
993{
994 int rc = 0;
995
996 dev_info->dev_hdl = ctx->dev_hdl;
997 strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
998 dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
999 dev_info->p_delay = 1;
1000
1001 return rc;
1002}
1003
1004static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
1005 struct cam_start_stop_dev_cmd *cmd)
1006{
1007 int rc = 0;
1008 struct cam_hw_start_args arg;
1009 struct cam_ctx_request *req;
1010 struct cam_isp_ctx_req *req_isp;
1011 struct cam_isp_context *ctx_isp =
1012 (struct cam_isp_context *) ctx->ctx_priv;
1013
1014 if (cmd->session_handle != ctx->session_hdl ||
1015 cmd->dev_handle != ctx->dev_hdl) {
1016 rc = -EPERM;
1017 goto end;
1018 }
1019
1020 if (list_empty(&ctx->pending_req_list)) {
1021 /* should never happen */
1022 pr_err("%s: Start device with empty configuration\n",
1023 __func__);
1024 rc = -EFAULT;
1025 goto end;
1026 } else {
1027 req = list_first_entry(&ctx->pending_req_list,
1028 struct cam_ctx_request, list);
1029 }
1030 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1031
1032 if (!ctx_isp->hw_ctx) {
1033 pr_err("%s:%d: Wrong hw context pointer.\n",
1034 __func__, __LINE__);
1035 rc = -EFAULT;
1036 goto end;
1037 }
1038 arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
1039 arg.hw_update_entries = req_isp->cfg;
1040 arg.num_hw_update_entries = req_isp->num_cfg;
1041
1042 ctx_isp->frame_id = 0;
1043 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
1044
1045 /*
1046 * Only place to change state before calling the hw due to
1047 * hardware tasklet has higher priority that can cause the
1048 * irq handling comes early
1049 */
1050 ctx->state = CAM_CTX_ACTIVATED;
1051 rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
1052 if (rc) {
1053 /* HW failure. user need to clean up the resource */
1054 pr_err("Start HW failed\n");
1055 ctx->state = CAM_CTX_READY;
1056 goto end;
1057 }
1058 CDBG("%s: start device success\n", __func__);
1059end:
1060 return rc;
1061}
1062
1063static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
1064 struct cam_req_mgr_core_dev_link_setup *unlink)
1065{
1066 int rc = 0;
1067
1068 ctx->link_hdl = 0;
1069 ctx->ctx_crm_intf = NULL;
1070 ctx->state = CAM_CTX_ACQUIRED;
1071
1072 return rc;
1073}
1074
1075static int __cam_isp_ctx_stop_dev_in_activated_unlock(
1076 struct cam_context *ctx)
1077{
1078 int rc = 0;
1079 uint32_t i;
1080 struct cam_hw_stop_args stop;
1081 struct cam_ctx_request *req;
1082 struct cam_isp_ctx_req *req_isp;
1083 struct cam_isp_context *ctx_isp =
1084 (struct cam_isp_context *) ctx->ctx_priv;
1085
1086 /* Mask off all the incoming hardware events */
1087 spin_lock(&ctx->lock);
1088 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
1089 spin_unlock(&ctx->lock);
1090 CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
1091
1092 /* stop hw first */
1093 if (ctx_isp->hw_ctx) {
1094 stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
1095 ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
1096 &stop);
1097 }
1098
1099 while (!list_empty(&ctx->pending_req_list)) {
1100 req = list_first_entry(&ctx->pending_req_list,
1101 struct cam_ctx_request, list);
1102 list_del_init(&req->list);
1103 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1104 CDBG("%s: signal fence in pending list. fence num %d\n",
1105 __func__, req_isp->num_fence_map_out);
1106 for (i = 0; i < req_isp->num_fence_map_out; i++)
1107 if (req_isp->fence_map_out[i].sync_id != -1) {
1108 cam_sync_signal(
1109 req_isp->fence_map_out[i].sync_id,
1110 CAM_SYNC_STATE_SIGNALED_ERROR);
1111 }
1112 list_add_tail(&req->list, &ctx->free_req_list);
1113 }
1114
1115 while (!list_empty(&ctx->active_req_list)) {
1116 req = list_first_entry(&ctx->active_req_list,
1117 struct cam_ctx_request, list);
1118 list_del_init(&req->list);
1119 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1120 CDBG("%s: signal fence in active list. fence num %d\n",
1121 __func__, req_isp->num_fence_map_out);
1122 for (i = 0; i < req_isp->num_fence_map_out; i++)
1123 if (req_isp->fence_map_out[i].sync_id != -1) {
1124 cam_sync_signal(
1125 req_isp->fence_map_out[i].sync_id,
1126 CAM_SYNC_STATE_SIGNALED_ERROR);
1127 }
1128 list_add_tail(&req->list, &ctx->free_req_list);
1129 }
1130 ctx_isp->frame_id = 0;
1131
1132 CDBG("%s: next state %d", __func__, ctx->state);
1133 return rc;
1134}
1135
1136static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
1137 struct cam_start_stop_dev_cmd *cmd)
1138{
1139 int rc = 0;
1140
1141 __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
1142 ctx->state = CAM_CTX_ACQUIRED;
1143 return rc;
1144}
1145
1146static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
1147 struct cam_release_dev_cmd *cmd)
1148{
1149 int rc = 0;
1150 struct cam_isp_context *ctx_isp =
1151 (struct cam_isp_context *) ctx->ctx_priv;
1152
1153 __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
1154
1155 if (ctx_isp->hw_ctx) {
1156 struct cam_hw_release_args arg;
1157
1158 arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
1159 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
1160 &arg);
1161 ctx_isp->hw_ctx = NULL;
1162 }
1163
1164 ctx->session_hdl = 0;
1165 ctx->dev_hdl = 0;
1166 ctx->link_hdl = 0;
1167 ctx->ctx_crm_intf = NULL;
1168
1169 ctx->state = CAM_CTX_AVAILABLE;
1170
1171 return rc;
1172}
1173
1174static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
1175 struct cam_req_mgr_apply_request *apply)
1176{
1177 int rc = 0;
1178 struct cam_isp_context *ctx_isp =
1179 (struct cam_isp_context *) ctx->ctx_priv;
1180
1181 CDBG("%s: Enter: apply req in Substate %d\n",
1182 __func__, ctx_isp->substate_activated);
1183 if (ctx_isp->substate_machine[ctx_isp->substate_activated].
1184 crm_ops.apply_req) {
1185 rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
1186 crm_ops.apply_req(ctx, apply);
1187 } else {
1188 pr_err("%s: No handle function in activated substate %d\n",
1189 __func__, ctx_isp->substate_activated);
1190 rc = -EFAULT;
1191 }
1192
1193 if (rc)
1194 pr_err("%s: Apply failed in active substate %d\n",
1195 __func__, ctx_isp->substate_activated);
1196 return rc;
1197}
1198
1199
1200
1201static int __cam_isp_ctx_handle_irq_in_activated(void *context,
1202 uint32_t evt_id, void *evt_data)
1203{
1204 int rc = 0;
1205 struct cam_context *ctx = (struct cam_context *)context;
1206 struct cam_isp_context *ctx_isp =
1207 (struct cam_isp_context *)ctx->ctx_priv;
1208
1209 spin_lock(&ctx->lock);
Jing Zhou9eabf472017-05-16 11:59:41 -07001210 CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
1211 __func__, ctx->state, ctx_isp->substate_activated, evt_id);
Jing Zhoud352ed12017-03-20 23:59:56 -07001212 if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
1213 irq_ops[evt_id]) {
1214 rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
1215 irq_ops[evt_id](ctx_isp, evt_data);
1216 } else {
1217 CDBG("%s: No handle function for substate %d\n", __func__,
1218 ctx_isp->substate_activated);
1219 }
1220 CDBG("%s: Exit: State %d Substate %d\n",
1221 __func__, ctx->state, ctx_isp->substate_activated);
1222 spin_unlock(&ctx->lock);
1223 return rc;
1224}
1225
1226/* top state machine */
1227static struct cam_ctx_ops
1228 cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
1229 /* Uninit */
1230 {
1231 .ioctl_ops = {},
1232 .crm_ops = {},
1233 .irq_ops = NULL,
1234 },
1235 /* Available */
1236 {
1237 .ioctl_ops = {
1238 .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
1239 },
1240 .crm_ops = {},
1241 .irq_ops = NULL,
1242 },
1243 /* Acquired */
1244 {
1245 .ioctl_ops = {
1246 .release_dev = __cam_isp_ctx_release_dev_in_top_state,
1247 .config_dev = __cam_isp_ctx_config_dev_in_acquired,
1248 },
1249 .crm_ops = {
1250 .link = __cam_isp_ctx_link_in_acquired,
1251 .unlink = __cam_isp_ctx_unlink_in_acquired,
1252 .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
1253 },
1254 .irq_ops = NULL,
1255 },
1256 /* Ready */
1257 {
1258 .ioctl_ops = {
1259 .start_dev = __cam_isp_ctx_start_dev_in_ready,
1260 .release_dev = __cam_isp_ctx_release_dev_in_top_state,
1261 .config_dev = __cam_isp_ctx_config_dev_in_top_state,
1262 },
1263 .crm_ops = {
1264 .unlink = __cam_isp_ctx_unlink_in_ready,
1265 },
1266 .irq_ops = NULL,
1267 },
1268 /* Activated */
1269 {
1270 .ioctl_ops = {
1271 .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
1272 .release_dev = __cam_isp_ctx_release_dev_in_activated,
1273 .config_dev = __cam_isp_ctx_config_dev_in_top_state,
1274 },
1275 .crm_ops = {
1276 .apply_req = __cam_isp_ctx_apply_req,
1277 },
1278 .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
1279 },
1280};
1281
1282
1283int cam_isp_context_init(struct cam_isp_context *ctx,
1284 struct cam_context *ctx_base,
1285 struct cam_req_mgr_kmd_ops *crm_node_intf,
1286 struct cam_hw_mgr_intf *hw_intf)
1287
1288{
1289 int rc = -1;
1290 int i;
1291
1292 if (!ctx || !ctx_base) {
1293 pr_err("%s: Invalid Context\n", __func__);
1294 goto err;
1295 }
1296
1297 /* ISP context setup */
1298 memset(ctx, 0, sizeof(*ctx));
1299
1300 ctx->base = ctx_base;
1301 ctx->frame_id = 0;
1302 ctx->hw_ctx = NULL;
1303 ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
1304 ctx->substate_machine = cam_isp_ctx_activated_state_machine;
1305 ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
1306
1307 for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
1308 ctx->req_base[i].req_priv = &ctx->req_isp[i];
1309 ctx->req_isp[i].base = &ctx->req_base[i];
1310 }
1311
1312 /* camera context setup */
1313 rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
1314 CAM_CTX_REQ_MAX);
1315 if (rc) {
1316 pr_err("%s: Camera Context Base init failed\n", __func__);
1317 goto err;
1318 }
1319
1320 /* link camera context with isp context */
1321 ctx_base->state_machine = cam_isp_ctx_top_state_machine;
1322 ctx_base->ctx_priv = ctx;
1323
1324err:
1325 return rc;
1326}
1327
1328int cam_isp_context_deinit(struct cam_isp_context *ctx)
1329{
1330 int rc = 0;
1331
1332 if (ctx->base)
1333 cam_context_deinit(ctx->base);
1334
1335 if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
1336 pr_err("%s: ISP context substate is invalid\n", __func__);
1337
1338 memset(ctx, 0, sizeof(*ctx));
1339 return rc;
1340}
1341