blob: 766ea8974a887f856ec661266e8fcdade6744e45 [file] [log] [blame]
Jing Zhoud352ed12017-03-20 23:59:56 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/debugfs.h>
14#include <linux/videodev2.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
Jing Zhoub524a852017-05-16 15:47:30 +053017#include <linux/ratelimit.h>
Jing Zhoud352ed12017-03-20 23:59:56 -070018
19#include "cam_isp_context.h"
20#include "cam_isp_log.h"
21#include "cam_mem_mgr.h"
22#include "cam_sync_api.h"
Jing Zhoub524a852017-05-16 15:47:30 +053023#include "cam_req_mgr_dev.h"
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -060024#include "cam_trace.h"
Jing Zhoud352ed12017-03-20 23:59:56 -070025
26#undef CDBG
27#define CDBG(fmt, args...) pr_debug(fmt, ##args)
28
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -060029static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
30{
31 uint64_t ts = 0;
32
33 if (!evt_data)
34 return 0;
35
36 switch (evt_id) {
37 case CAM_ISP_HW_EVENT_ERROR:
38 ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
39 timestamp;
40 break;
41 case CAM_ISP_HW_EVENT_SOF:
42 ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
43 timestamp;
44 break;
45 case CAM_ISP_HW_EVENT_REG_UPDATE:
46 ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
47 timestamp;
48 break;
49 case CAM_ISP_HW_EVENT_EPOCH:
50 ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
51 timestamp;
52 break;
53 case CAM_ISP_HW_EVENT_EOF:
54 ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
55 timestamp;
56 break;
57 case CAM_ISP_HW_EVENT_DONE:
58 break;
59 default:
60 CDBG("%s: Invalid Event Type %d\n", __func__, evt_id);
61 }
62
63 return ts;
64}
65
Jing Zhoud352ed12017-03-20 23:59:56 -070066static int __cam_isp_ctx_handle_buf_done_in_activated_state(
67 struct cam_isp_context *ctx_isp,
68 struct cam_isp_hw_done_event_data *done,
69 uint32_t bubble_state)
70{
71 int rc = 0;
72 int i, j;
73 struct cam_ctx_request *req;
74 struct cam_isp_ctx_req *req_isp;
75 struct cam_context *ctx = ctx_isp->base;
76
77 if (list_empty(&ctx->active_req_list)) {
78 CDBG("Buf done with no active request!\n");
79 goto end;
80 }
81
82 CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
83
84 req = list_first_entry(&ctx->active_req_list,
85 struct cam_ctx_request, list);
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -060086
87 trace_cam_buf_done("ISP", ctx, req);
88
Jing Zhoud352ed12017-03-20 23:59:56 -070089 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
90 for (i = 0; i < done->num_handles; i++) {
91 for (j = 0; j < req_isp->num_fence_map_out; j++) {
92 if (done->resource_handle[i] ==
93 req_isp->fence_map_out[j].resource_handle)
94 break;
95 }
96
97 if (j == req_isp->num_fence_map_out) {
98 pr_err("Can not find matching lane handle 0x%x!\n",
99 done->resource_handle[i]);
100 rc = -EINVAL;
101 continue;
102 }
103
104 if (!bubble_state) {
Jing Zhoubb536a82017-05-18 15:20:38 -0700105 CDBG("%s: Sync with success: fd 0x%x\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700106 req_isp->fence_map_out[j].sync_id);
Jing Zhoubb536a82017-05-18 15:20:38 -0700107 rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
Jing Zhoud352ed12017-03-20 23:59:56 -0700108 CAM_SYNC_STATE_SIGNALED_SUCCESS);
Jing Zhoubb536a82017-05-18 15:20:38 -0700109 if (rc)
110 pr_err("%s: Sync failed with rc = %d\n",
111 __func__, rc);
112
Jing Zhoud352ed12017-03-20 23:59:56 -0700113 } else if (!req_isp->bubble_report) {
Jing Zhoubb536a82017-05-18 15:20:38 -0700114 CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700115 req_isp->fence_map_out[j].sync_id);
Jing Zhoubb536a82017-05-18 15:20:38 -0700116 rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
Jing Zhoud352ed12017-03-20 23:59:56 -0700117 CAM_SYNC_STATE_SIGNALED_ERROR);
Jing Zhoubb536a82017-05-18 15:20:38 -0700118 if (rc)
119 pr_err("%s: Sync failed with rc = %d\n",
120 __func__, rc);
Jing Zhoud352ed12017-03-20 23:59:56 -0700121 } else {
122 /*
123 * Ignore the buffer done if bubble detect is on
124 * In most case, active list should be empty when
125 * bubble detects. But for safety, we just move the
126 * current active request to the pending list here.
127 */
128 list_del_init(&req->list);
129 list_add(&req->list, &ctx->pending_req_list);
130 continue;
131 }
132
133 CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
134 req->request_id,
135 req_isp->fence_map_out[j].sync_id);
136 req_isp->num_acked++;
137 req_isp->fence_map_out[j].sync_id = -1;
138 }
139
140 if (req_isp->num_acked == req_isp->num_fence_map_out) {
141 list_del_init(&req->list);
142 list_add_tail(&req->list, &ctx->free_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530143 ctx_isp->active_req_cnt--;
144 CDBG("%s: Move active request %lld to free list(cnt = %d)\n",
145 __func__, req->request_id, ctx_isp->active_req_cnt);
Jing Zhoud352ed12017-03-20 23:59:56 -0700146 }
147
148end:
149 return rc;
150}
151
Jing Zhoudedc4762017-06-19 17:45:36 +0530152static void __cam_isp_ctx_send_sof_timestamp(
153 struct cam_isp_context *ctx_isp, uint64_t request_id,
154 uint32_t sof_event_status)
155{
156 struct cam_req_mgr_message req_msg;
157
158 req_msg.session_hdl = ctx_isp->base->session_hdl;
159 req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
160 req_msg.u.frame_msg.request_id = request_id;
161 req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
162 req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
163 req_msg.u.frame_msg.sof_status = sof_event_status;
164
165 CDBG("%s: request id:%lld frame number:%lld SOF time stamp:0x%llx\n",
166 __func__, request_id, ctx_isp->frame_id,
167 ctx_isp->sof_timestamp_val);
168 CDBG("%s sof status:%d\n", __func__, sof_event_status);
169
170 if (cam_req_mgr_notify_frame_message(&req_msg,
171 V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
172 pr_err("%s: Error in notifying the sof time for req id:%lld\n",
173 __func__, request_id);
174}
175
Jing Zhoud352ed12017-03-20 23:59:56 -0700176static int __cam_isp_ctx_reg_upd_in_activated_state(
177 struct cam_isp_context *ctx_isp, void *evt_data)
178{
179 int rc = 0;
180 struct cam_ctx_request *req;
181 struct cam_context *ctx = ctx_isp->base;
182 struct cam_isp_ctx_req *req_isp;
183
184 if (list_empty(&ctx->pending_req_list)) {
185 pr_err("Reg upd ack with no pending request\n");
186 goto end;
187 }
188 req = list_first_entry(&ctx->pending_req_list,
189 struct cam_ctx_request, list);
190 list_del_init(&req->list);
191
192 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
193 if (req_isp->num_fence_map_out != 0) {
Jing Zhoud352ed12017-03-20 23:59:56 -0700194 list_add_tail(&req->list, &ctx->active_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530195 ctx_isp->active_req_cnt++;
196 CDBG("%s: move request %lld to active list(cnt = %d)\n",
197 __func__, req->request_id, ctx_isp->active_req_cnt);
Jing Zhoud352ed12017-03-20 23:59:56 -0700198 } else {
199 /* no io config, so the request is completed. */
200 list_add_tail(&req->list, &ctx->free_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530201 CDBG("%s: move active request %lld to free list(cnt = %d)\n",
202 __func__, req->request_id, ctx_isp->active_req_cnt);
Jing Zhoud352ed12017-03-20 23:59:56 -0700203 }
204
205 /*
206 * This function only called directly from applied and bubble applied
207 * state so change substate here.
208 */
209 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
210 CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
211
212end:
213 return rc;
214}
215
216static int __cam_isp_ctx_notify_sof_in_actived_state(
217 struct cam_isp_context *ctx_isp, void *evt_data)
218{
Jing Zhoud352ed12017-03-20 23:59:56 -0700219 struct cam_req_mgr_sof_notify notify;
220 struct cam_context *ctx = ctx_isp->base;
Jing Zhoudedc4762017-06-19 17:45:36 +0530221 struct cam_ctx_request *req;
222 uint64_t request_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700223
Jing Zhoudedc4762017-06-19 17:45:36 +0530224 /*
225 * notify reqmgr with sof signal. Note, due to scheduling delay
226 * we can run into situation that two active requests has already
227 * be in the active queue while we try to do the notification.
228 * In this case, we need to skip the current notification. This
229 * helps the state machine to catch up the delay.
230 */
231 if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
232 ctx_isp->active_req_cnt <= 2) {
Jing Zhoud352ed12017-03-20 23:59:56 -0700233 notify.link_hdl = ctx->link_hdl;
234 notify.dev_hdl = ctx->dev_hdl;
235 notify.frame_id = ctx_isp->frame_id;
236
237 ctx->ctx_crm_intf->notify_sof(&notify);
238 CDBG("%s: Notify CRM SOF frame %lld\n", __func__,
239 ctx_isp->frame_id);
Jing Zhoudedc4762017-06-19 17:45:36 +0530240
241 list_for_each_entry(req, &ctx->active_req_list, list) {
242 if (req->request_id > ctx_isp->reported_req_id) {
243 request_id = req->request_id;
244 ctx_isp->reported_req_id = request_id;
245 break;
246 }
247 }
248
249 __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
250 CAM_REQ_MGR_SOF_EVENT_SUCCESS);
Jing Zhoud352ed12017-03-20 23:59:56 -0700251 } else {
252 pr_err("%s: Can not notify SOF to CRM\n", __func__);
253 }
254
Jing Zhoudedc4762017-06-19 17:45:36 +0530255 return 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700256}
257
258
Jing Zhoudedc4762017-06-19 17:45:36 +0530259static int __cam_isp_ctx_sof_in_activated_state(
260 struct cam_isp_context *ctx_isp, void *evt_data)
Jing Zhoud352ed12017-03-20 23:59:56 -0700261{
262 int rc = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +0530263 struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
Jing Zhoud352ed12017-03-20 23:59:56 -0700264
Jing Zhoudedc4762017-06-19 17:45:36 +0530265 if (!evt_data) {
266 pr_err("%s: in valid sof event data\n", __func__);
267 return -EINVAL;
268 }
269
Jing Zhoud352ed12017-03-20 23:59:56 -0700270 ctx_isp->frame_id++;
Jing Zhoudedc4762017-06-19 17:45:36 +0530271 ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
272 CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
273 ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
Jing Zhoud352ed12017-03-20 23:59:56 -0700274
275 return rc;
276}
277
278static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
279 void *evt_data)
280{
281 int rc = 0;
282 struct cam_ctx_request *req;
283 struct cam_isp_ctx_req *req_isp;
284 struct cam_context *ctx = ctx_isp->base;
285
286 if (ctx->state != CAM_CTX_ACTIVATED) {
287 CDBG("%s: invalid RUP\n", __func__);
288 goto end;
289 }
290
291 /*
292 * This is for the first update. The initial setting will
293 * cause the reg_upd in the first frame.
294 */
295 if (!list_empty(&ctx->pending_req_list)) {
296 req = list_first_entry(&ctx->pending_req_list,
297 struct cam_ctx_request, list);
298 list_del_init(&req->list);
299 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
Jing Zhoudedc4762017-06-19 17:45:36 +0530300 if (req_isp->num_fence_map_out == req_isp->num_acked) {
Jing Zhoud352ed12017-03-20 23:59:56 -0700301 list_add_tail(&req->list, &ctx->free_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530302 } else {
Jing Zhoud352ed12017-03-20 23:59:56 -0700303 /* need to handle the buf done */
304 list_add_tail(&req->list, &ctx->active_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530305 ctx_isp->active_req_cnt++;
306 CDBG("%s: move request %lld to active list(cnt = %d)\n",
307 __func__, req->request_id,
308 ctx_isp->active_req_cnt);
Jing Zhoud352ed12017-03-20 23:59:56 -0700309 ctx_isp->substate_activated =
310 CAM_ISP_CTX_ACTIVATED_EPOCH;
311 }
312 }
313end:
314 return rc;
315}
316
317static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
318 void *evt_data)
319{
Jing Zhoud352ed12017-03-20 23:59:56 -0700320 struct cam_ctx_request *req;
321 struct cam_isp_ctx_req *req_isp;
322 struct cam_context *ctx = ctx_isp->base;
Jing Zhoudedc4762017-06-19 17:45:36 +0530323 uint64_t request_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700324
325 if (list_empty(&ctx->pending_req_list)) {
326 /*
327 * If no pending req in epoch, this is an error case.
328 * The recovery is to go back to sof state
329 */
330 pr_err("%s: No pending request\n", __func__);
331 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
Jing Zhoudedc4762017-06-19 17:45:36 +0530332
333 /* Send SOF event as empty frame*/
334 __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
335 CAM_REQ_MGR_SOF_EVENT_SUCCESS);
336
Jing Zhoud352ed12017-03-20 23:59:56 -0700337 goto end;
338 }
339
340 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
341 list);
342 req_isp = (struct cam_isp_ctx_req *)req->req_priv;
343
Jing Zhou45b55cc2017-05-16 17:27:18 -0700344 CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
Jing Zhoud352ed12017-03-20 23:59:56 -0700345 if (req_isp->bubble_report && ctx->ctx_crm_intf &&
346 ctx->ctx_crm_intf->notify_err) {
347 struct cam_req_mgr_error_notify notify;
348
349 notify.link_hdl = ctx->link_hdl;
350 notify.dev_hdl = ctx->dev_hdl;
351 notify.req_id = req->request_id;
352 notify.error = CRM_KMD_ERR_BUBBLE;
353 ctx->ctx_crm_intf->notify_err(&notify);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700354 CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700355 ctx_isp->frame_id);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700356 } else {
357 /*
358 * Since can not bubble report, always move the request to
359 * active list.
360 */
361 list_del_init(&req->list);
362 list_add_tail(&req->list, &ctx->active_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530363 ctx_isp->active_req_cnt++;
364 CDBG("%s: move request %lld to active list(cnt = %d)\n",
365 __func__, req->request_id, ctx_isp->active_req_cnt);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700366 req_isp->bubble_report = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700367 }
368
Jing Zhoudedc4762017-06-19 17:45:36 +0530369 request_id = req->request_id;
370 __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
371 CAM_REQ_MGR_SOF_EVENT_ERROR);
372
Jing Zhoud352ed12017-03-20 23:59:56 -0700373 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
374 CDBG("%s: next substate %d\n", __func__,
375 ctx_isp->substate_activated);
376end:
Jing Zhoudedc4762017-06-19 17:45:36 +0530377 return 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700378}
379
380
381static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
382 void *evt_data)
383{
384 int rc = 0;
385 struct cam_isp_hw_done_event_data *done =
386 (struct cam_isp_hw_done_event_data *) evt_data;
387
388 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
389 return rc;
390}
391
392
393static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
394 void *evt_data)
395{
396 int rc = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +0530397 struct cam_context *ctx = ctx_isp->base;
398 struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
Jing Zhou93b3ec12017-06-15 17:43:39 -0700399
Jing Zhoudedc4762017-06-19 17:45:36 +0530400 if (!evt_data) {
401 pr_err("%s: in valid sof event data\n", __func__);
402 return -EINVAL;
403 }
Jing Zhoud352ed12017-03-20 23:59:56 -0700404
405 ctx_isp->frame_id++;
Jing Zhoudedc4762017-06-19 17:45:36 +0530406 ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
407
Jing Zhou93b3ec12017-06-15 17:43:39 -0700408 if (list_empty(&ctx->active_req_list))
409 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
410 else
411 CDBG("%s: Still need to wait for the buf done\n", __func__);
Jing Zhoudedc4762017-06-19 17:45:36 +0530412
Jing Zhoubb536a82017-05-18 15:20:38 -0700413 CDBG("%s: next substate %d\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700414 ctx_isp->substate_activated);
415
416 return rc;
417}
418
419static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
420 void *evt_data)
421{
422 int rc = 0;
423 struct cam_isp_hw_done_event_data *done =
424 (struct cam_isp_hw_done_event_data *) evt_data;
425
426 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
427 return rc;
428}
429
Jing Zhoud352ed12017-03-20 23:59:56 -0700430static int __cam_isp_ctx_buf_done_in_bubble(
431 struct cam_isp_context *ctx_isp, void *evt_data)
432{
433 int rc = 0;
434 struct cam_isp_hw_done_event_data *done =
435 (struct cam_isp_hw_done_event_data *) evt_data;
436
437 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
438 return rc;
439}
440
Jing Zhoud352ed12017-03-20 23:59:56 -0700441static int __cam_isp_ctx_epoch_in_bubble_applied(
442 struct cam_isp_context *ctx_isp, void *evt_data)
443{
444 struct cam_ctx_request *req;
445 struct cam_isp_ctx_req *req_isp;
446 struct cam_context *ctx = ctx_isp->base;
Jing Zhoudedc4762017-06-19 17:45:36 +0530447 uint64_t request_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700448
449 /*
450 * This means we missed the reg upd ack. So we need to
451 * transition to BUBBLE state again.
452 */
453
454 if (list_empty(&ctx->pending_req_list)) {
455 /*
456 * If no pending req in epoch, this is an error case.
457 * Just go back to the bubble state.
458 */
459 pr_err("%s: No pending request.\n", __func__);
Jing Zhoudedc4762017-06-19 17:45:36 +0530460 __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
461 CAM_REQ_MGR_SOF_EVENT_SUCCESS);
462
Jing Zhoud352ed12017-03-20 23:59:56 -0700463 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
464 goto end;
465 }
466
467 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
468 list);
469 req_isp = (struct cam_isp_ctx_req *)req->req_priv;
470
471 if (req_isp->bubble_report && ctx->ctx_crm_intf &&
472 ctx->ctx_crm_intf->notify_err) {
473 struct cam_req_mgr_error_notify notify;
474
475 notify.link_hdl = ctx->link_hdl;
476 notify.dev_hdl = ctx->dev_hdl;
477 notify.req_id = req->request_id;
478 notify.error = CRM_KMD_ERR_BUBBLE;
479 ctx->ctx_crm_intf->notify_err(&notify);
480 CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
481 ctx_isp->frame_id);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700482 } else {
483 /*
484 * If we can not report bubble, then treat it as if no bubble
485 * report. Just move the req to active list.
486 */
487 list_del_init(&req->list);
488 list_add_tail(&req->list, &ctx->active_req_list);
Jing Zhoudedc4762017-06-19 17:45:36 +0530489 ctx_isp->active_req_cnt++;
490 CDBG("%s: move request %lld to active list(cnt = %d)\n",
491 __func__, req->request_id, ctx_isp->active_req_cnt);
Jing Zhou45b55cc2017-05-16 17:27:18 -0700492 req_isp->bubble_report = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700493 }
494
Jing Zhoudedc4762017-06-19 17:45:36 +0530495 request_id = req->request_id;
496 __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
497 CAM_REQ_MGR_SOF_EVENT_ERROR);
498
Jing Zhoud352ed12017-03-20 23:59:56 -0700499 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
500 CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
501end:
502 return 0;
503}
504
505static int __cam_isp_ctx_buf_done_in_bubble_applied(
506 struct cam_isp_context *ctx_isp, void *evt_data)
507{
508 int rc = 0;
509 struct cam_isp_hw_done_event_data *done =
510 (struct cam_isp_hw_done_event_data *) evt_data;
511
512 rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
513 return rc;
514}
515
516static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
517 void *evt_data)
518{
519 int rc = 0;
520 struct cam_ctx_request *req;
521 struct cam_req_mgr_error_notify notify;
522
523 struct cam_context *ctx = ctx_isp->base;
524 struct cam_isp_hw_error_event_data *error_event_data =
525 (struct cam_isp_hw_error_event_data *)evt_data;
526
527 uint32_t error_type = error_event_data->error_type;
528
529 CDBG("%s: Enter error_type = %d\n", __func__, error_type);
530 if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
531 (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
532 notify.error = CRM_KMD_ERR_FATAL;
533
534 /*
535 * Need to check the active req
536 * move all of them to the pending request list
537 * Note this funciton need revisit!
538 */
539
540 if (list_empty(&ctx->active_req_list)) {
541 pr_err("handling error with no active request!\n");
542 rc = -EINVAL;
543 goto end;
544 }
545
546 req = list_first_entry(&ctx->active_req_list,
547 struct cam_ctx_request, list);
548
549 if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
550 notify.link_hdl = ctx->link_hdl;
551 notify.dev_hdl = ctx->dev_hdl;
552 notify.req_id = req->request_id;
553
554 ctx->ctx_crm_intf->notify_err(&notify);
555 pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
556 ctx_isp->frame_id);
557 } else {
558 pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
559 rc = -EFAULT;
560 }
561
562 list_del_init(&req->list);
563 list_add(&req->list, &ctx->pending_req_list);
564 /* might need to check if active list is empty */
565
566end:
567 CDBG("%s: Exit\n", __func__);
568 return rc;
569}
570
571static struct cam_isp_ctx_irq_ops
572 cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
573 /* SOF */
574 {
575 .irq_ops = {
576 NULL,
Jing Zhoudedc4762017-06-19 17:45:36 +0530577 __cam_isp_ctx_sof_in_activated_state,
Jing Zhoud352ed12017-03-20 23:59:56 -0700578 __cam_isp_ctx_reg_upd_in_sof,
579 __cam_isp_ctx_notify_sof_in_actived_state,
580 NULL,
581 NULL,
582 },
583 },
584 /* APPLIED */
585 {
586 .irq_ops = {
587 __cam_isp_ctx_handle_error,
Jing Zhoudedc4762017-06-19 17:45:36 +0530588 __cam_isp_ctx_sof_in_activated_state,
Jing Zhoud352ed12017-03-20 23:59:56 -0700589 __cam_isp_ctx_reg_upd_in_activated_state,
590 __cam_isp_ctx_epoch_in_applied,
591 NULL,
592 __cam_isp_ctx_buf_done_in_applied,
593 },
594 },
595 /* EPOCH */
596 {
597 .irq_ops = {
598 __cam_isp_ctx_handle_error,
599 __cam_isp_ctx_sof_in_epoch,
600 NULL,
601 __cam_isp_ctx_notify_sof_in_actived_state,
602 NULL,
603 __cam_isp_ctx_buf_done_in_epoch,
604 },
605 },
606 /* BUBBLE */
607 {
608 .irq_ops = {
609 NULL,
Jing Zhoudedc4762017-06-19 17:45:36 +0530610 __cam_isp_ctx_sof_in_activated_state,
Jing Zhoud352ed12017-03-20 23:59:56 -0700611 NULL,
612 __cam_isp_ctx_notify_sof_in_actived_state,
613 NULL,
614 __cam_isp_ctx_buf_done_in_bubble,
615 },
616 },
617 /* Bubble Applied */
618 {
619 .irq_ops = {
620 NULL,
Jing Zhoudedc4762017-06-19 17:45:36 +0530621 __cam_isp_ctx_sof_in_activated_state,
Jing Zhoud352ed12017-03-20 23:59:56 -0700622 __cam_isp_ctx_reg_upd_in_activated_state,
623 __cam_isp_ctx_epoch_in_bubble_applied,
624 NULL,
625 __cam_isp_ctx_buf_done_in_bubble_applied,
626 },
627 },
628 /* HALT */
629 {
630 },
631};
632
633static int __cam_isp_ctx_apply_req_in_activated_state(
634 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
635 uint32_t next_state)
636{
637 int rc = 0;
638 struct cam_ctx_request *req;
639 struct cam_isp_ctx_req *req_isp;
640 struct cam_isp_context *ctx_isp;
641 struct cam_hw_config_args cfg;
642
643 if (list_empty(&ctx->pending_req_list)) {
644 pr_err("%s: No available request for Apply id %lld\n",
645 __func__, apply->request_id);
646 rc = -EFAULT;
647 goto end;
648 }
Jing Zhou9eabf472017-05-16 11:59:41 -0700649
650 /*
651 * When the pipeline has issue, the requests can be queued up in the
652 * pipeline. In this case, we should reject the additional request.
653 * The maximum number of request allowed to be outstanding is 2.
654 *
655 */
Jing Zhoudedc4762017-06-19 17:45:36 +0530656 ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
657 if (ctx_isp->active_req_cnt >= 2) {
658 CDBG("%s: Reject apply request due to congestion(cnt = %d)\n",
659 __func__, ctx_isp->active_req_cnt);
660 rc = -EFAULT;
661 goto end;
Jing Zhou9eabf472017-05-16 11:59:41 -0700662 }
663
Jing Zhoud352ed12017-03-20 23:59:56 -0700664 req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
665 list);
666
667 /*
668 * Check whehter the request id is matching the tip, if not, this means
669 * we are in the middle of the error handling. Need to reject this apply
670 */
671 if (req->request_id != apply->request_id) {
672 rc = -EFAULT;
673 goto end;
674 }
675
Jing Zhou45b55cc2017-05-16 17:27:18 -0700676 CDBG("%s: Apply request %lld\n", __func__, req->request_id);
Jing Zhoud352ed12017-03-20 23:59:56 -0700677 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
Jing Zhoud352ed12017-03-20 23:59:56 -0700678
679 req_isp->bubble_report = apply->report_if_bubble;
680
681 cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
682 cfg.hw_update_entries = req_isp->cfg;
683 cfg.num_hw_update_entries = req_isp->num_cfg;
684
685 rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
686 if (rc) {
687 pr_err("%s: Can not apply the configuration\n", __func__);
688 } else {
Jing Zhou93b3ec12017-06-15 17:43:39 -0700689 spin_lock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -0700690 ctx_isp->substate_activated = next_state;
691 CDBG("%s: new state %d\n", __func__, next_state);
Jing Zhou93b3ec12017-06-15 17:43:39 -0700692 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -0700693 }
694end:
695 return rc;
696}
697
698static int __cam_isp_ctx_apply_req_in_sof(
699 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
700{
701 int rc = 0;
702 struct cam_isp_context *ctx_isp =
703 (struct cam_isp_context *) ctx->ctx_priv;
704
705 CDBG("%s: current substate %d\n", __func__,
706 ctx_isp->substate_activated);
707 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
708 CAM_ISP_CTX_ACTIVATED_APPLIED);
709 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
710
711 return rc;
712}
713
714static int __cam_isp_ctx_apply_req_in_epoch(
715 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
716{
717 int rc = 0;
718 struct cam_isp_context *ctx_isp =
719 (struct cam_isp_context *) ctx->ctx_priv;
720
721 CDBG("%s: current substate %d\n", __func__,
722 ctx_isp->substate_activated);
723 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
724 CAM_ISP_CTX_ACTIVATED_APPLIED);
725 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
726
727 return rc;
728}
729
730static int __cam_isp_ctx_apply_req_in_bubble(
731 struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
732{
733 int rc = 0;
734 struct cam_isp_context *ctx_isp =
735 (struct cam_isp_context *) ctx->ctx_priv;
736
737 CDBG("%s: current substate %d\n", __func__,
738 ctx_isp->substate_activated);
739 rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
740 CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
741 CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
742
743 return rc;
744}
745
Jing Zhoub524a852017-05-16 15:47:30 +0530746static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
747 struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
748{
749 int i, rc;
750 uint32_t cancel_req_id_found = 0;
751 struct cam_ctx_request *req;
752 struct cam_ctx_request *req_temp;
753 struct cam_isp_ctx_req *req_isp;
754
755 spin_lock(&ctx->lock);
756 if (list_empty(req_list)) {
757 spin_unlock(&ctx->lock);
758 CDBG("%s: request list is empty\n", __func__);
759 return 0;
760 }
761
762 list_for_each_entry_safe(req, req_temp, req_list, list) {
763 if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
764 && (req->request_id != flush_req->req_id))
765 continue;
766
767 list_del_init(&req->list);
768 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
769 for (i = 0; i < req_isp->num_fence_map_out; i++) {
770 if (req_isp->fence_map_out[i].sync_id != -1) {
771 CDBG("%s: Flush req 0x%llx, fence %d\n",
772 __func__, req->request_id,
773 req_isp->fence_map_out[i].sync_id);
774 rc = cam_sync_signal(
775 req_isp->fence_map_out[i].sync_id,
776 CAM_SYNC_STATE_SIGNALED_ERROR);
777 if (rc)
778 pr_err_ratelimited("%s: signal fence failed\n",
779 __func__);
780 req_isp->fence_map_out[i].sync_id = -1;
781 }
782 }
783 list_add_tail(&req->list, &ctx->free_req_list);
784
785 /* If flush request id found, exit the loop */
786 if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
787 cancel_req_id_found = 1;
788 break;
789 }
790 }
791 spin_unlock(&ctx->lock);
792
793 if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
794 !cancel_req_id_found)
795 CDBG("%s:Flush request id:%lld is not found in the list\n",
796 __func__, flush_req->req_id);
797
798 return 0;
799}
800
801static int __cam_isp_ctx_flush_req_in_top_state(
802 struct cam_context *ctx,
803 struct cam_req_mgr_flush_request *flush_req)
804{
805 int rc = 0;
806
807 CDBG("%s: try to flush pending list\n", __func__);
808 rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
809 CDBG("%s: Flush request in top state %d\n",
810 __func__, ctx->state);
811 return rc;
812}
813
814static int __cam_isp_ctx_flush_req_in_ready(
815 struct cam_context *ctx,
816 struct cam_req_mgr_flush_request *flush_req)
817{
818 int rc = 0;
819
820 CDBG("%s: try to flush pending list\n", __func__);
821 rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
822
823 /* if nothing is in pending req list, change state to acquire*/
824 spin_lock(&ctx->lock);
825 if (list_empty(&ctx->pending_req_list))
826 ctx->state = CAM_CTX_ACQUIRED;
827 spin_unlock(&ctx->lock);
828
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -0600829 trace_cam_context_state("ISP", ctx);
830
Jing Zhoub524a852017-05-16 15:47:30 +0530831 CDBG("%s: Flush request in ready state. next state %d\n",
832 __func__, ctx->state);
833 return rc;
834}
835
Jing Zhoud352ed12017-03-20 23:59:56 -0700836static struct cam_ctx_ops
837 cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
838 /* SOF */
839 {
840 .ioctl_ops = {},
841 .crm_ops = {
842 .apply_req = __cam_isp_ctx_apply_req_in_sof,
843 },
844 .irq_ops = NULL,
845 },
846 /* APPLIED */
847 {
848 .ioctl_ops = {},
849 .crm_ops = {},
850 .irq_ops = NULL,
851 },
852 /* EPOCH */
853 {
854 .ioctl_ops = {},
855 .crm_ops = {
856 .apply_req = __cam_isp_ctx_apply_req_in_epoch,
857 },
858 .irq_ops = NULL,
859 },
860 /* BUBBLE */
861 {
862 .ioctl_ops = {},
863 .crm_ops = {
864 .apply_req = __cam_isp_ctx_apply_req_in_bubble,
865 },
866 .irq_ops = NULL,
867 },
868 /* Bubble Applied */
869 {
870 .ioctl_ops = {},
871 .crm_ops = {},
872 .irq_ops = NULL,
873 },
874 /* HALT */
875 {
876 .ioctl_ops = {},
877 .crm_ops = {},
878 .irq_ops = NULL,
879 },
880};
881
882
883/* top level state machine */
884static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
885 struct cam_release_dev_cmd *cmd)
886{
887 int rc = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700888 struct cam_hw_release_args rel_arg;
Jing Zhoud352ed12017-03-20 23:59:56 -0700889 struct cam_isp_context *ctx_isp =
890 (struct cam_isp_context *) ctx->ctx_priv;
Jing Zhoub524a852017-05-16 15:47:30 +0530891 struct cam_req_mgr_flush_request flush_req;
Jing Zhoud352ed12017-03-20 23:59:56 -0700892
893 if (ctx_isp->hw_ctx) {
894 rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
895 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
896 &rel_arg);
897 ctx_isp->hw_ctx = NULL;
898 }
899
900 ctx->session_hdl = 0;
901 ctx->dev_hdl = 0;
902 ctx->link_hdl = 0;
Jing Zhoue71fd4a2017-05-15 19:44:34 -0700903 ctx->ctx_crm_intf = NULL;
Jing Zhoud352ed12017-03-20 23:59:56 -0700904 ctx_isp->frame_id = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +0530905 ctx_isp->active_req_cnt = 0;
906 ctx_isp->reported_req_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -0700907
908 /*
909 * Ideally, we should never have any active request here.
910 * But we still add some sanity check code here to help the debug
911 */
912 if (!list_empty(&ctx->active_req_list))
Jing Zhoub524a852017-05-16 15:47:30 +0530913 pr_err("%s: Active list is not empty\n", __func__);
Jing Zhoud352ed12017-03-20 23:59:56 -0700914
Jing Zhoub524a852017-05-16 15:47:30 +0530915 /* Flush all the pending request list */
916 flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
917 flush_req.link_hdl = ctx->link_hdl;
918 flush_req.dev_hdl = ctx->dev_hdl;
919
920 CDBG("%s: try to flush pending list\n", __func__);
921 rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
922
Jing Zhoud352ed12017-03-20 23:59:56 -0700923 ctx->state = CAM_CTX_AVAILABLE;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -0600924
925 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -0700926 CDBG("%s: next state %d\n", __func__, ctx->state);
927 return rc;
928}
929
930static int __cam_isp_ctx_config_dev_in_top_state(
931 struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
932{
933 int rc = 0;
934 struct cam_ctx_request *req = NULL;
935 struct cam_isp_ctx_req *req_isp;
936 uint64_t packet_addr;
937 struct cam_packet *packet;
938 size_t len = 0;
939 struct cam_hw_prepare_update_args cfg;
940 struct cam_req_mgr_add_request add_req;
941 struct cam_isp_context *ctx_isp =
942 (struct cam_isp_context *) ctx->ctx_priv;
943
944 CDBG("%s: get free request object......\n", __func__);
945
946 /* get free request */
Jing Zhou93b3ec12017-06-15 17:43:39 -0700947 spin_lock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -0700948 if (!list_empty(&ctx->free_req_list)) {
949 req = list_first_entry(&ctx->free_req_list,
950 struct cam_ctx_request, list);
951 list_del_init(&req->list);
952 }
Jing Zhou93b3ec12017-06-15 17:43:39 -0700953 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -0700954
955 if (!req) {
956 pr_err("%s: No more request obj free\n", __func__);
957 rc = -ENOMEM;
958 goto end;
959 }
960
961 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
962
963 /* for config dev, only memory handle is supported */
964 /* map packet from the memhandle */
965 rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
966 (uint64_t *) &packet_addr, &len);
967 if (rc != 0) {
968 pr_err("%s: Can not get packet address\n", __func__);
969 rc = -EINVAL;
970 goto free_req;
971 }
972
973 packet = (struct cam_packet *) (packet_addr + cmd->offset);
974 CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
975 CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
976 CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
977 len, cmd->offset);
Jing Zhoudedc4762017-06-19 17:45:36 +0530978 CDBG("%s: Packet request id %lld\n", __func__,
Jing Zhoud352ed12017-03-20 23:59:56 -0700979 packet->header.request_id);
980 CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
981 CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
982
983 /* preprocess the configuration */
984 memset(&cfg, 0, sizeof(cfg));
985 cfg.packet = packet;
986 cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
987 cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
988 cfg.hw_update_entries = req_isp->cfg;
989 cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
990 cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
991 cfg.out_map_entries = req_isp->fence_map_out;
992 cfg.in_map_entries = req_isp->fence_map_in;
993
994 CDBG("%s: try to prepare config packet......\n", __func__);
995
996 rc = ctx->hw_mgr_intf->hw_prepare_update(
997 ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
998 if (rc != 0) {
999 pr_err("%s: Prepare config packet failed in HW layer\n",
1000 __func__);
1001 rc = -EFAULT;
1002 goto free_req;
1003 }
1004 req_isp->num_cfg = cfg.num_hw_update_entries;
1005 req_isp->num_fence_map_out = cfg.num_out_map_entries;
1006 req_isp->num_fence_map_in = cfg.num_in_map_entries;
1007 req_isp->num_acked = 0;
1008
1009 CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
1010 __func__, req_isp->num_cfg, req_isp->num_fence_map_out,
1011 req_isp->num_fence_map_in);
1012
1013 req->request_id = packet->header.request_id;
1014 req->status = 1;
1015
1016 if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
1017 add_req.link_hdl = ctx->link_hdl;
1018 add_req.dev_hdl = ctx->dev_hdl;
1019 add_req.req_id = req->request_id;
1020 rc = ctx->ctx_crm_intf->add_req(&add_req);
1021 if (rc) {
1022 pr_err("%s: Error: Adding request id=%llu\n", __func__,
1023 req->request_id);
1024 goto free_req;
1025 }
1026 }
1027
1028 CDBG("%s: Packet request id 0x%llx\n", __func__,
1029 packet->header.request_id);
1030
Jing Zhou93b3ec12017-06-15 17:43:39 -07001031 spin_lock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001032 list_add_tail(&req->list, &ctx->pending_req_list);
Jing Zhou93b3ec12017-06-15 17:43:39 -07001033 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001034
1035 CDBG("%s: Preprocessing Config %lld successful\n", __func__,
1036 req->request_id);
1037
1038 return rc;
1039
1040free_req:
Jing Zhou93b3ec12017-06-15 17:43:39 -07001041 spin_lock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001042 list_add_tail(&req->list, &ctx->free_req_list);
Jing Zhou93b3ec12017-06-15 17:43:39 -07001043 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001044end:
1045 return rc;
1046}
1047
1048static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
1049 struct cam_acquire_dev_cmd *cmd)
1050{
1051 int rc = 0;
1052 struct cam_hw_acquire_args param;
1053 struct cam_isp_resource *isp_res = NULL;
1054 struct cam_create_dev_hdl req_hdl_param;
1055 struct cam_hw_release_args release;
1056 struct cam_isp_context *ctx_isp =
1057 (struct cam_isp_context *) ctx->ctx_priv;
1058
1059 if (!ctx->hw_mgr_intf) {
1060 pr_err("HW interface is not ready!\n");
1061 rc = -EFAULT;
1062 goto end;
1063 }
1064
1065 CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
1066 __func__, cmd->session_handle, cmd->num_resources,
1067 cmd->handle_type, cmd->resource_hdl);
1068
1069 if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
1070 pr_err("Too much resources in the acquire!\n");
1071 rc = -ENOMEM;
1072 goto end;
1073 }
1074
1075 /* for now we only support user pointer */
1076 if (cmd->handle_type != 1) {
1077 pr_err("%s: Only user pointer is supported!", __func__);
1078 rc = -EINVAL;
1079 goto end;
1080 }
1081
1082 isp_res = kzalloc(
1083 sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
1084 if (!isp_res) {
1085 rc = -ENOMEM;
1086 goto end;
1087 }
1088
1089 CDBG("%s: start copy %d resources from user\n",
1090 __func__, cmd->num_resources);
1091
1092 if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
1093 sizeof(*isp_res)*cmd->num_resources)) {
1094 rc = -EFAULT;
1095 goto free_res;
1096 }
1097
1098 param.context_data = ctx;
1099 param.event_cb = ctx->irq_cb_intf;
1100 param.num_acq = cmd->num_resources;
1101 param.acquire_info = (uint64_t) isp_res;
1102
1103 /* call HW manager to reserve the resource */
1104 rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
1105 &param);
1106 if (rc != 0) {
1107 pr_err("Acquire device failed\n");
1108 goto free_res;
1109 }
1110
1111 ctx_isp->hw_ctx = param.ctxt_to_hw_map;
1112
1113 req_hdl_param.session_hdl = cmd->session_handle;
1114 /* bridge is not ready for these flags. so false for now */
1115 req_hdl_param.v4l2_sub_dev_flag = 0;
1116 req_hdl_param.media_entity_flag = 0;
1117 req_hdl_param.ops = ctx->crm_ctx_intf;
1118 req_hdl_param.priv = ctx;
1119
1120 CDBG("%s: get device handle form bridge\n", __func__);
1121 ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
1122 if (ctx->dev_hdl <= 0) {
1123 rc = -EFAULT;
1124 pr_err("Can not create device handle\n");
1125 goto free_hw;
1126 }
1127 cmd->dev_handle = ctx->dev_hdl;
1128
1129 /* store session information */
1130 ctx->session_hdl = cmd->session_handle;
1131
1132 ctx->state = CAM_CTX_ACQUIRED;
1133
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001134 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001135 CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
1136 kfree(isp_res);
1137 return rc;
1138
1139free_hw:
1140 release.ctxt_to_hw_map = ctx_isp->hw_ctx;
1141 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
1142 ctx_isp->hw_ctx = NULL;
1143free_res:
1144 kfree(isp_res);
1145end:
1146 return rc;
1147}
1148
1149static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
1150 struct cam_config_dev_cmd *cmd)
1151{
1152 int rc = 0;
1153
1154 rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
1155
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001156 if (!rc && ctx->link_hdl) {
Jing Zhoud352ed12017-03-20 23:59:56 -07001157 ctx->state = CAM_CTX_READY;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001158 trace_cam_context_state("ISP", ctx);
1159 }
Jing Zhoud352ed12017-03-20 23:59:56 -07001160
1161 CDBG("%s: next state %d\n", __func__, ctx->state);
1162 return rc;
1163}
1164
1165static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
1166 struct cam_req_mgr_core_dev_link_setup *link)
1167{
1168 int rc = 0;
1169
1170 CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
1171
1172 ctx->link_hdl = link->link_hdl;
1173 ctx->ctx_crm_intf = link->crm_cb;
1174
1175 /* change state only if we had the init config */
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001176 if (!list_empty(&ctx->pending_req_list)) {
Jing Zhoud352ed12017-03-20 23:59:56 -07001177 ctx->state = CAM_CTX_READY;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001178 trace_cam_context_state("ISP", ctx);
1179 }
Jing Zhoud352ed12017-03-20 23:59:56 -07001180
1181 CDBG("%s: next state %d\n", __func__, ctx->state);
1182
1183 return rc;
1184}
1185
1186static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
1187 struct cam_req_mgr_core_dev_link_setup *unlink)
1188{
1189 int rc = 0;
1190
1191 ctx->link_hdl = 0;
1192 ctx->ctx_crm_intf = NULL;
1193
1194 return rc;
1195}
1196
1197static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
1198 struct cam_req_mgr_device_info *dev_info)
1199{
1200 int rc = 0;
1201
1202 dev_info->dev_hdl = ctx->dev_hdl;
1203 strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
1204 dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
1205 dev_info->p_delay = 1;
1206
1207 return rc;
1208}
1209
1210static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
1211 struct cam_start_stop_dev_cmd *cmd)
1212{
1213 int rc = 0;
1214 struct cam_hw_start_args arg;
1215 struct cam_ctx_request *req;
1216 struct cam_isp_ctx_req *req_isp;
1217 struct cam_isp_context *ctx_isp =
1218 (struct cam_isp_context *) ctx->ctx_priv;
1219
1220 if (cmd->session_handle != ctx->session_hdl ||
1221 cmd->dev_handle != ctx->dev_hdl) {
1222 rc = -EPERM;
1223 goto end;
1224 }
1225
1226 if (list_empty(&ctx->pending_req_list)) {
1227 /* should never happen */
1228 pr_err("%s: Start device with empty configuration\n",
1229 __func__);
1230 rc = -EFAULT;
1231 goto end;
1232 } else {
1233 req = list_first_entry(&ctx->pending_req_list,
1234 struct cam_ctx_request, list);
1235 }
1236 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1237
1238 if (!ctx_isp->hw_ctx) {
1239 pr_err("%s:%d: Wrong hw context pointer.\n",
1240 __func__, __LINE__);
1241 rc = -EFAULT;
1242 goto end;
1243 }
1244 arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
1245 arg.hw_update_entries = req_isp->cfg;
1246 arg.num_hw_update_entries = req_isp->num_cfg;
1247
1248 ctx_isp->frame_id = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +05301249 ctx_isp->active_req_cnt = 0;
1250 ctx_isp->reported_req_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -07001251 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
1252
1253 /*
1254 * Only place to change state before calling the hw due to
1255 * hardware tasklet has higher priority that can cause the
1256 * irq handling comes early
1257 */
1258 ctx->state = CAM_CTX_ACTIVATED;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001259 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001260 rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
1261 if (rc) {
1262 /* HW failure. user need to clean up the resource */
1263 pr_err("Start HW failed\n");
1264 ctx->state = CAM_CTX_READY;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001265 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001266 goto end;
1267 }
1268 CDBG("%s: start device success\n", __func__);
1269end:
1270 return rc;
1271}
1272
1273static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
1274 struct cam_req_mgr_core_dev_link_setup *unlink)
1275{
1276 int rc = 0;
1277
1278 ctx->link_hdl = 0;
1279 ctx->ctx_crm_intf = NULL;
1280 ctx->state = CAM_CTX_ACQUIRED;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001281 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001282
1283 return rc;
1284}
1285
1286static int __cam_isp_ctx_stop_dev_in_activated_unlock(
1287 struct cam_context *ctx)
1288{
1289 int rc = 0;
1290 uint32_t i;
1291 struct cam_hw_stop_args stop;
1292 struct cam_ctx_request *req;
1293 struct cam_isp_ctx_req *req_isp;
1294 struct cam_isp_context *ctx_isp =
1295 (struct cam_isp_context *) ctx->ctx_priv;
1296
1297 /* Mask off all the incoming hardware events */
Jing Zhou93b3ec12017-06-15 17:43:39 -07001298 spin_lock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001299 ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
Jing Zhou93b3ec12017-06-15 17:43:39 -07001300 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001301 CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
1302
1303 /* stop hw first */
1304 if (ctx_isp->hw_ctx) {
1305 stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
1306 ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
1307 &stop);
1308 }
1309
1310 while (!list_empty(&ctx->pending_req_list)) {
1311 req = list_first_entry(&ctx->pending_req_list,
1312 struct cam_ctx_request, list);
1313 list_del_init(&req->list);
1314 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1315 CDBG("%s: signal fence in pending list. fence num %d\n",
1316 __func__, req_isp->num_fence_map_out);
1317 for (i = 0; i < req_isp->num_fence_map_out; i++)
1318 if (req_isp->fence_map_out[i].sync_id != -1) {
1319 cam_sync_signal(
1320 req_isp->fence_map_out[i].sync_id,
1321 CAM_SYNC_STATE_SIGNALED_ERROR);
1322 }
1323 list_add_tail(&req->list, &ctx->free_req_list);
1324 }
1325
1326 while (!list_empty(&ctx->active_req_list)) {
1327 req = list_first_entry(&ctx->active_req_list,
1328 struct cam_ctx_request, list);
1329 list_del_init(&req->list);
1330 req_isp = (struct cam_isp_ctx_req *) req->req_priv;
1331 CDBG("%s: signal fence in active list. fence num %d\n",
1332 __func__, req_isp->num_fence_map_out);
1333 for (i = 0; i < req_isp->num_fence_map_out; i++)
1334 if (req_isp->fence_map_out[i].sync_id != -1) {
1335 cam_sync_signal(
1336 req_isp->fence_map_out[i].sync_id,
1337 CAM_SYNC_STATE_SIGNALED_ERROR);
1338 }
1339 list_add_tail(&req->list, &ctx->free_req_list);
1340 }
1341 ctx_isp->frame_id = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +05301342 ctx_isp->active_req_cnt = 0;
1343 ctx_isp->reported_req_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -07001344
1345 CDBG("%s: next state %d", __func__, ctx->state);
1346 return rc;
1347}
1348
1349static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
1350 struct cam_start_stop_dev_cmd *cmd)
1351{
1352 int rc = 0;
1353
1354 __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
1355 ctx->state = CAM_CTX_ACQUIRED;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001356 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001357 return rc;
1358}
1359
1360static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
1361 struct cam_release_dev_cmd *cmd)
1362{
1363 int rc = 0;
1364 struct cam_isp_context *ctx_isp =
1365 (struct cam_isp_context *) ctx->ctx_priv;
1366
1367 __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
1368
1369 if (ctx_isp->hw_ctx) {
1370 struct cam_hw_release_args arg;
1371
1372 arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
1373 ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
1374 &arg);
1375 ctx_isp->hw_ctx = NULL;
1376 }
1377
1378 ctx->session_hdl = 0;
1379 ctx->dev_hdl = 0;
1380 ctx->link_hdl = 0;
1381 ctx->ctx_crm_intf = NULL;
1382
1383 ctx->state = CAM_CTX_AVAILABLE;
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001384 trace_cam_context_state("ISP", ctx);
Jing Zhoud352ed12017-03-20 23:59:56 -07001385
1386 return rc;
1387}
1388
1389static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
1390 struct cam_req_mgr_apply_request *apply)
1391{
1392 int rc = 0;
1393 struct cam_isp_context *ctx_isp =
1394 (struct cam_isp_context *) ctx->ctx_priv;
1395
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001396 trace_cam_apply_req("ISP", apply);
Jing Zhoudedc4762017-06-19 17:45:36 +05301397 CDBG("%s: Enter: apply req in Substate %d request _id:%lld\n",
1398 __func__, ctx_isp->substate_activated, apply->request_id);
Jing Zhoud352ed12017-03-20 23:59:56 -07001399 if (ctx_isp->substate_machine[ctx_isp->substate_activated].
1400 crm_ops.apply_req) {
1401 rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
1402 crm_ops.apply_req(ctx, apply);
1403 } else {
1404 pr_err("%s: No handle function in activated substate %d\n",
1405 __func__, ctx_isp->substate_activated);
1406 rc = -EFAULT;
1407 }
1408
1409 if (rc)
1410 pr_err("%s: Apply failed in active substate %d\n",
1411 __func__, ctx_isp->substate_activated);
1412 return rc;
1413}
1414
1415
1416
1417static int __cam_isp_ctx_handle_irq_in_activated(void *context,
1418 uint32_t evt_id, void *evt_data)
1419{
1420 int rc = 0;
1421 struct cam_context *ctx = (struct cam_context *)context;
1422 struct cam_isp_context *ctx_isp =
1423 (struct cam_isp_context *)ctx->ctx_priv;
1424
Jing Zhou93b3ec12017-06-15 17:43:39 -07001425 spin_lock_bh(&ctx->lock);
Gregory Bergschneider5164f3a2017-07-07 10:26:17 -06001426
1427 trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
1428 __cam_isp_ctx_get_event_ts(evt_id, evt_data));
1429
Jing Zhou9eabf472017-05-16 11:59:41 -07001430 CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
1431 __func__, ctx->state, ctx_isp->substate_activated, evt_id);
Jing Zhoud352ed12017-03-20 23:59:56 -07001432 if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
1433 irq_ops[evt_id]) {
1434 rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
1435 irq_ops[evt_id](ctx_isp, evt_data);
1436 } else {
1437 CDBG("%s: No handle function for substate %d\n", __func__,
1438 ctx_isp->substate_activated);
1439 }
1440 CDBG("%s: Exit: State %d Substate %d\n",
1441 __func__, ctx->state, ctx_isp->substate_activated);
Jing Zhou93b3ec12017-06-15 17:43:39 -07001442 spin_unlock_bh(&ctx->lock);
Jing Zhoud352ed12017-03-20 23:59:56 -07001443 return rc;
1444}
1445
1446/* top state machine */
1447static struct cam_ctx_ops
1448 cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
1449 /* Uninit */
1450 {
1451 .ioctl_ops = {},
1452 .crm_ops = {},
1453 .irq_ops = NULL,
1454 },
1455 /* Available */
1456 {
1457 .ioctl_ops = {
1458 .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
1459 },
1460 .crm_ops = {},
1461 .irq_ops = NULL,
1462 },
1463 /* Acquired */
1464 {
1465 .ioctl_ops = {
1466 .release_dev = __cam_isp_ctx_release_dev_in_top_state,
1467 .config_dev = __cam_isp_ctx_config_dev_in_acquired,
1468 },
1469 .crm_ops = {
1470 .link = __cam_isp_ctx_link_in_acquired,
1471 .unlink = __cam_isp_ctx_unlink_in_acquired,
1472 .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
Jing Zhoub524a852017-05-16 15:47:30 +05301473 .flush_req = __cam_isp_ctx_flush_req_in_top_state,
Jing Zhoud352ed12017-03-20 23:59:56 -07001474 },
1475 .irq_ops = NULL,
1476 },
1477 /* Ready */
1478 {
1479 .ioctl_ops = {
1480 .start_dev = __cam_isp_ctx_start_dev_in_ready,
1481 .release_dev = __cam_isp_ctx_release_dev_in_top_state,
1482 .config_dev = __cam_isp_ctx_config_dev_in_top_state,
1483 },
1484 .crm_ops = {
1485 .unlink = __cam_isp_ctx_unlink_in_ready,
Jing Zhoub524a852017-05-16 15:47:30 +05301486 .flush_req = __cam_isp_ctx_flush_req_in_ready,
Jing Zhoud352ed12017-03-20 23:59:56 -07001487 },
1488 .irq_ops = NULL,
1489 },
1490 /* Activated */
1491 {
1492 .ioctl_ops = {
1493 .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
1494 .release_dev = __cam_isp_ctx_release_dev_in_activated,
1495 .config_dev = __cam_isp_ctx_config_dev_in_top_state,
1496 },
1497 .crm_ops = {
1498 .apply_req = __cam_isp_ctx_apply_req,
Jing Zhoub524a852017-05-16 15:47:30 +05301499 .flush_req = __cam_isp_ctx_flush_req_in_top_state,
Jing Zhoud352ed12017-03-20 23:59:56 -07001500 },
1501 .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
1502 },
1503};
1504
1505
1506int cam_isp_context_init(struct cam_isp_context *ctx,
1507 struct cam_context *ctx_base,
1508 struct cam_req_mgr_kmd_ops *crm_node_intf,
1509 struct cam_hw_mgr_intf *hw_intf)
1510
1511{
1512 int rc = -1;
1513 int i;
1514
1515 if (!ctx || !ctx_base) {
1516 pr_err("%s: Invalid Context\n", __func__);
1517 goto err;
1518 }
1519
1520 /* ISP context setup */
1521 memset(ctx, 0, sizeof(*ctx));
1522
1523 ctx->base = ctx_base;
1524 ctx->frame_id = 0;
Jing Zhoudedc4762017-06-19 17:45:36 +05301525 ctx->active_req_cnt = 0;
1526 ctx->reported_req_id = 0;
Jing Zhoud352ed12017-03-20 23:59:56 -07001527 ctx->hw_ctx = NULL;
1528 ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
1529 ctx->substate_machine = cam_isp_ctx_activated_state_machine;
1530 ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
1531
1532 for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
1533 ctx->req_base[i].req_priv = &ctx->req_isp[i];
1534 ctx->req_isp[i].base = &ctx->req_base[i];
1535 }
1536
1537 /* camera context setup */
1538 rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
1539 CAM_CTX_REQ_MAX);
1540 if (rc) {
1541 pr_err("%s: Camera Context Base init failed\n", __func__);
1542 goto err;
1543 }
1544
1545 /* link camera context with isp context */
1546 ctx_base->state_machine = cam_isp_ctx_top_state_machine;
1547 ctx_base->ctx_priv = ctx;
1548
1549err:
1550 return rc;
1551}
1552
1553int cam_isp_context_deinit(struct cam_isp_context *ctx)
1554{
1555 int rc = 0;
1556
1557 if (ctx->base)
1558 cam_context_deinit(ctx->base);
1559
1560 if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
1561 pr_err("%s: ISP context substate is invalid\n", __func__);
1562
1563 memset(ctx, 0, sizeof(*ctx));
1564 return rc;
1565}
1566