blob: 91e1e74f7360b2b8cff0ad86aa411459f108a5de [file] [log] [blame]
Raja Mallikff6c75b2019-01-29 16:52:37 +05301/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
Raja Mallikc7e256f2018-12-06 17:36:28 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/string.h>
15#include <linux/uaccess.h>
16#include <linux/debugfs.h>
17#include <soc/qcom/scm.h>
18#include <uapi/media/cam_isp.h>
19#include "cam_smmu_api.h"
20#include "cam_req_mgr_workq.h"
21#include "cam_isp_hw_mgr_intf.h"
22#include "cam_isp_hw.h"
23#include "cam_ife_csid_hw_intf.h"
24#include "cam_vfe_hw_intf.h"
25#include "cam_isp_packet_parser.h"
26#include "cam_ife_hw_mgr.h"
27#include "cam_cdm_intf_api.h"
28#include "cam_packet_util.h"
29#include "cam_debug_util.h"
30#include "cam_cpas_api.h"
31#include "cam_mem_mgr_api.h"
32#include "cam_common_util.h"
33
34#define CAM_IFE_HW_ENTRIES_MAX 20
35
36#define TZ_SVC_SMMU_PROGRAM 0x15
37#define TZ_SAFE_SYSCALL_ID 0x3
38#define CAM_IFE_SAFE_DISABLE 0
39#define CAM_IFE_SAFE_ENABLE 1
40#define SMMU_SE_IFE 0
41
42#define CAM_ISP_PACKET_META_MAX \
43 (CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
44
45#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \
Raja Mallik8b88b232019-04-04 14:32:27 +053046 (CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2 + 1)
Raja Mallikc7e256f2018-12-06 17:36:28 +053047
48static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
49 CAM_ISP_HW_CMD_GET_HFR_UPDATE,
50 CAM_ISP_HW_CMD_CLOCK_UPDATE,
51 CAM_ISP_HW_CMD_BW_UPDATE,
52 CAM_ISP_HW_CMD_UBWC_UPDATE,
53 CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
Raja Mallikfe46d932019-02-12 20:34:07 +053054 CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG,
Raja Mallikc7e256f2018-12-06 17:36:28 +053055};
56
57static struct cam_ife_hw_mgr g_ife_hw_mgr;
58
59static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
60{
61 uint32_t camera_hw_version, rc = 0;
62 struct scm_desc desc = {0};
63
64 rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
65 if (!rc) {
66 switch (camera_hw_version) {
67 case CAM_CPAS_TITAN_170_V100:
68 case CAM_CPAS_TITAN_170_V110:
69 case CAM_CPAS_TITAN_175_V100:
70
71 desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
72 desc.args[0] = SMMU_SE_IFE;
73 desc.args[1] = safe_trigger;
74
75 CAM_DBG(CAM_ISP, "Safe scm call %d", safe_trigger);
76 if (scm_call2(SCM_SIP_FNID(TZ_SVC_SMMU_PROGRAM,
77 TZ_SAFE_SYSCALL_ID), &desc)) {
78 CAM_ERR(CAM_ISP,
79 "scm call to Enable Safe failed");
80 rc = -EINVAL;
81 }
82 break;
83 default:
84 break;
85 }
86 }
87
88 return rc;
89}
90
91static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
92 void *hw_caps_args)
93{
94 int rc = 0;
95 int i;
96 struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
97 struct cam_query_cap_cmd *query = hw_caps_args;
98 struct cam_isp_query_cap_cmd query_isp;
99
100 CAM_DBG(CAM_ISP, "enter");
101
102 if (copy_from_user(&query_isp,
103 u64_to_user_ptr(query->caps_handle),
104 sizeof(struct cam_isp_query_cap_cmd))) {
105 rc = -EFAULT;
106 return rc;
107 }
108
109 query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl;
110 query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure;
111 query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl;
112 query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure;
113 query_isp.num_dev = 2;
114 for (i = 0; i < query_isp.num_dev; i++) {
115 query_isp.dev_caps[i].hw_type = CAM_ISP_HW_IFE;
116 query_isp.dev_caps[i].hw_version.major = 1;
117 query_isp.dev_caps[i].hw_version.minor = 7;
118 query_isp.dev_caps[i].hw_version.incr = 0;
119 query_isp.dev_caps[i].hw_version.reserved = 0;
120 }
121
122 if (copy_to_user(u64_to_user_ptr(query->caps_handle),
123 &query_isp, sizeof(struct cam_isp_query_cap_cmd)))
124 rc = -EFAULT;
125
126 CAM_DBG(CAM_ISP, "exit rc :%d", rc);
127
128 return rc;
129}
130
131static int cam_ife_hw_mgr_is_rdi_res(uint32_t res_id)
132{
133 int rc = 0;
134
135 switch (res_id) {
136 case CAM_ISP_IFE_OUT_RES_RDI_0:
137 case CAM_ISP_IFE_OUT_RES_RDI_1:
138 case CAM_ISP_IFE_OUT_RES_RDI_2:
139 case CAM_ISP_IFE_OUT_RES_RDI_3:
140 rc = 1;
141 break;
142 default:
143 break;
144 }
145
146 return rc;
147}
148
Raja Mallike3ed1a32019-08-22 17:12:32 +0530149static const char *cam_ife_hw_mgr_get_res_id(
150 enum cam_ife_pix_path_res_id csid_res_id)
151{
152 char *res_name = NULL;
153
154 switch (csid_res_id) {
155 case CAM_IFE_PIX_PATH_RES_RDI_0:
156 res_name = "RDI_0";
157 break;
158 case CAM_IFE_PIX_PATH_RES_RDI_1:
159 res_name = "RDI_1";
160 break;
161 case CAM_IFE_PIX_PATH_RES_RDI_2:
162 res_name = "RDI_2";
163 break;
164 case CAM_IFE_PIX_PATH_RES_RDI_3:
165 res_name = "RDI_3";
166 break;
167 case CAM_IFE_PIX_PATH_RES_IPP:
168 res_name = "IPP";
169 break;
170 case CAM_IFE_PIX_PATH_RES_PPP:
171 res_name = "PPP";
172 break;
173 case CAM_IFE_PIX_PATH_RES_MAX:
174 res_name = "Invalid Max res";
175 break;
176 default:
177 res_name = "Invalid";
178 break;
179 }
180 return res_name;
181}
182
183static const char *cam_ife_hw_mgr_get_res_type(
184 enum cam_isp_resource_type csid_res_type)
185{
186 char *res_type = NULL;
187
188 switch (csid_res_type) {
189 case CAM_ISP_RESOURCE_UNINT:
190 res_type = "Unint";
191 break;
192 case CAM_ISP_RESOURCE_SRC:
193 res_type = "Src";
194 break;
195 case CAM_ISP_RESOURCE_CID:
196 res_type = "Cid";
197 break;
198 case CAM_ISP_RESOURCE_PIX_PATH:
199 res_type = "Pix Path";
200 break;
201 case CAM_ISP_RESOURCE_VFE_IN:
202 res_type = "Vfe In";
203 break;
204 case CAM_ISP_RESOURCE_VFE_OUT:
205 res_type = "Vfe Out";
206 break;
207 case CAM_ISP_RESOURCE_MAX:
208 res_type = "Invalid Max res";
209 break;
210 default:
211 res_type = "Invalid";
212 break;
213 }
214 return res_type;
215}
216
Raja Mallikc7e256f2018-12-06 17:36:28 +0530217static int cam_ife_hw_mgr_reset_csid_res(
218 struct cam_ife_hw_mgr_res *isp_hw_res)
219{
220 int i;
221 int rc = 0;
222 struct cam_hw_intf *hw_intf;
223 struct cam_csid_reset_cfg_args csid_reset_args;
224
225 csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
226
227 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
228 if (!isp_hw_res->hw_res[i])
229 continue;
230 csid_reset_args.node_res = isp_hw_res->hw_res[i];
231 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
232 CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
233 hw_intf->hw_idx);
234 if (hw_intf->hw_ops.reset) {
235 rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
236 &csid_reset_args,
237 sizeof(struct cam_csid_reset_cfg_args));
238 if (rc <= 0)
239 goto err;
240 }
241 }
242
243 return 0;
244err:
245 CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
246 isp_hw_res->res_type, isp_hw_res->res_id);
247 return rc;
248}
249
250static int cam_ife_hw_mgr_init_hw_res(
251 struct cam_ife_hw_mgr_res *isp_hw_res)
252{
253 int i;
254 int rc = -1;
255 struct cam_hw_intf *hw_intf;
256
257 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
258 if (!isp_hw_res->hw_res[i])
259 continue;
260 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
261 CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
262 hw_intf->hw_idx);
263 if (hw_intf->hw_ops.init) {
264 rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
265 isp_hw_res->hw_res[i],
266 sizeof(struct cam_isp_resource_node));
267 if (rc)
268 goto err;
269 }
270 }
271
272 return 0;
273err:
274 CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
275 isp_hw_res->res_type, isp_hw_res->res_id);
276 return rc;
277}
278
279static int cam_ife_hw_mgr_start_hw_res(
280 struct cam_ife_hw_mgr_res *isp_hw_res,
281 struct cam_ife_hw_mgr_ctx *ctx)
282{
283 int i;
284 int rc = -1;
285 struct cam_hw_intf *hw_intf;
286
287 /* Start slave (which is right split) first */
288 for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
289 if (!isp_hw_res->hw_res[i])
290 continue;
291 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
292 if (hw_intf->hw_ops.start) {
293 isp_hw_res->hw_res[i]->rdi_only_ctx =
294 ctx->is_rdi_only_context;
295 rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
296 isp_hw_res->hw_res[i],
297 sizeof(struct cam_isp_resource_node));
298 if (rc) {
299 CAM_ERR(CAM_ISP, "Can not start HW resources");
300 goto err;
301 }
302 CAM_DBG(CAM_ISP, "Start HW %d Res %d", hw_intf->hw_idx,
303 isp_hw_res->hw_res[i]->res_id);
304 } else {
305 CAM_ERR(CAM_ISP, "function null");
306 goto err;
307 }
308 }
309
310 return 0;
311err:
312 CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
313 isp_hw_res->res_type, isp_hw_res->res_id);
314 return rc;
315}
316
317static void cam_ife_hw_mgr_stop_hw_res(
318 struct cam_ife_hw_mgr_res *isp_hw_res)
319{
320 int i;
321 struct cam_hw_intf *hw_intf;
322 uint32_t dummy_args;
323
324 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
325 if (!isp_hw_res->hw_res[i])
326 continue;
327 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
328 if (hw_intf->hw_ops.stop)
329 hw_intf->hw_ops.stop(hw_intf->hw_priv,
330 isp_hw_res->hw_res[i],
331 sizeof(struct cam_isp_resource_node));
332 else
333 CAM_ERR(CAM_ISP, "stop null");
334 if (hw_intf->hw_ops.process_cmd &&
335 isp_hw_res->res_type == CAM_IFE_HW_MGR_RES_IFE_OUT) {
336 hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
337 CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
338 &dummy_args, sizeof(dummy_args));
339 }
340 }
341}
342
343static void cam_ife_hw_mgr_deinit_hw_res(
344 struct cam_ife_hw_mgr_res *isp_hw_res)
345{
346 int i;
347 struct cam_hw_intf *hw_intf;
348
349 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
350 if (!isp_hw_res->hw_res[i])
351 continue;
352 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
353 if (hw_intf->hw_ops.deinit)
354 hw_intf->hw_ops.deinit(hw_intf->hw_priv,
355 isp_hw_res->hw_res[i],
356 sizeof(struct cam_isp_resource_node));
357 }
358}
359
360static void cam_ife_hw_mgr_deinit_hw(
361 struct cam_ife_hw_mgr_ctx *ctx)
362{
363 struct cam_ife_hw_mgr_res *hw_mgr_res;
364 int i = 0;
365
366 if (!ctx->init_done) {
367 CAM_WARN(CAM_ISP, "ctx is not in init state");
368 return;
369 }
370
371 /* Deinit IFE CID */
372 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
373 CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
374 cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
375 }
376
377 /* Deinit IFE CSID */
378 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
379 CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
380 cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
381 }
382
383 /* Deint IFE MUX(SRC) */
384 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
385 cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
386 }
387
Raja Mallikfe46d932019-02-12 20:34:07 +0530388 /* Deint IFE RD */
389 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
390 cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
391 }
392
Raja Mallikc7e256f2018-12-06 17:36:28 +0530393 /* Deinit IFE OUT */
394 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
395 cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
396
397 ctx->init_done = false;
398}
399
400static int cam_ife_hw_mgr_init_hw(
401 struct cam_ife_hw_mgr_ctx *ctx)
402{
403 struct cam_ife_hw_mgr_res *hw_mgr_res;
404 int rc = 0, i;
405
406 CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
407 ctx->ctx_index);
408 /* INIT IFE CID */
409 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
410 rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
411 if (rc) {
412 CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
413 hw_mgr_res->res_id);
414 goto deinit;
415 }
416 }
417
418 CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
419 ctx->ctx_index);
420
421 /* INIT IFE csid */
422 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
423 rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
424 if (rc) {
425 CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
426 hw_mgr_res->res_id);
427 goto deinit;
428 }
429 }
430
431 /* INIT IFE SRC */
432 CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
433 ctx->ctx_index);
434 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
435 rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
436 if (rc) {
437 CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
438 hw_mgr_res->res_id);
439 goto deinit;
440 }
441 }
442
Raja Mallikfe46d932019-02-12 20:34:07 +0530443 /* INIT IFE BUS RD */
444 CAM_DBG(CAM_ISP, "INIT IFE BUS RD in ctx id:%d",
445 ctx->ctx_index);
446 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
447 rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
448 if (rc) {
449 CAM_ERR(CAM_ISP, "Can not IFE BUS RD (%d)",
450 hw_mgr_res->res_id);
451 return rc;
452 }
453 }
454
Raja Mallikc7e256f2018-12-06 17:36:28 +0530455 /* INIT IFE OUT */
456 CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
457 ctx->ctx_index);
458
459 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
460 rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
461 if (rc) {
462 CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
463 ctx->res_list_ife_out[i].res_id);
464 goto deinit;
465 }
466 }
467
468 return rc;
469deinit:
470 ctx->init_done = true;
471 cam_ife_hw_mgr_deinit_hw(ctx);
472 return rc;
473}
474
475static int cam_ife_hw_mgr_put_res(
476 struct list_head *src_list,
477 struct cam_ife_hw_mgr_res **res)
478{
479 int rc = 0;
480 struct cam_ife_hw_mgr_res *res_ptr = NULL;
481
482 res_ptr = *res;
483 if (res_ptr)
484 list_add_tail(&res_ptr->list, src_list);
485
486 return rc;
487}
488
489static int cam_ife_hw_mgr_get_res(
490 struct list_head *src_list,
491 struct cam_ife_hw_mgr_res **res)
492{
493 int rc = 0;
494 struct cam_ife_hw_mgr_res *res_ptr = NULL;
495
496 if (!list_empty(src_list)) {
497 res_ptr = list_first_entry(src_list,
498 struct cam_ife_hw_mgr_res, list);
499 list_del_init(&res_ptr->list);
500 } else {
501 CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
502 rc = -1;
503 }
504 *res = res_ptr;
505
506 return rc;
507}
508
509static int cam_ife_hw_mgr_free_hw_res(
510 struct cam_ife_hw_mgr_res *isp_hw_res)
511{
512 int rc = 0;
513 int i;
514 struct cam_hw_intf *hw_intf;
515
516 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
517 if (!isp_hw_res->hw_res[i])
518 continue;
519 hw_intf = isp_hw_res->hw_res[i]->hw_intf;
520 if (hw_intf->hw_ops.release) {
521 rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
522 isp_hw_res->hw_res[i],
523 sizeof(struct cam_isp_resource_node));
524 if (rc)
525 CAM_ERR(CAM_ISP,
526 "Release hw resource id %d failed",
527 isp_hw_res->res_id);
528 isp_hw_res->hw_res[i] = NULL;
529 } else
530 CAM_ERR(CAM_ISP, "Release null");
531 }
532 /* caller should make sure the resource is in a list */
533 list_del_init(&isp_hw_res->list);
534 memset(isp_hw_res, 0, sizeof(*isp_hw_res));
535 INIT_LIST_HEAD(&isp_hw_res->list);
536
537 return 0;
538}
539
540static int cam_ife_mgr_csid_stop_hw(
541 struct cam_ife_hw_mgr_ctx *ctx, struct list_head *stop_list,
542 uint32_t base_idx, uint32_t stop_cmd)
543{
544 struct cam_ife_hw_mgr_res *hw_mgr_res;
545 struct cam_isp_resource_node *isp_res;
546 struct cam_isp_resource_node *stop_res[CAM_IFE_PIX_PATH_RES_MAX - 1];
547 struct cam_csid_hw_stop_args stop;
548 struct cam_hw_intf *hw_intf;
549 uint32_t i, cnt;
550
551 cnt = 0;
552 list_for_each_entry(hw_mgr_res, stop_list, list) {
553 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
554 if (!hw_mgr_res->hw_res[i])
555 continue;
556
557 isp_res = hw_mgr_res->hw_res[i];
558 if (isp_res->hw_intf->hw_idx != base_idx)
559 continue;
560 CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u",
561 base_idx, isp_res->res_id, cnt);
562 stop_res[cnt] = isp_res;
563 cnt++;
564 }
565 }
566
567 if (cnt) {
568 hw_intf = stop_res[0]->hw_intf;
569 stop.num_res = cnt;
570 stop.node_res = stop_res;
571 stop.stop_cmd = stop_cmd;
572 hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop));
573 }
574
575 return 0;
576}
577
578static int cam_ife_hw_mgr_release_hw_for_ctx(
579 struct cam_ife_hw_mgr_ctx *ife_ctx)
580{
581 uint32_t i;
582 struct cam_ife_hw_mgr_res *hw_mgr_res;
583 struct cam_ife_hw_mgr_res *hw_mgr_res_temp;
584
585 /* ife leaf resource */
586 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
587 cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_out[i]);
588
Raja Mallikfe46d932019-02-12 20:34:07 +0530589 /* ife bus rd resource */
590 list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
591 &ife_ctx->res_list_ife_in_rd, list) {
592 cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
593 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
594 }
595
Raja Mallikc7e256f2018-12-06 17:36:28 +0530596 /* ife source resource */
597 list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
598 &ife_ctx->res_list_ife_src, list) {
599 cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
600 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
601 }
602
603 /* ife csid resource */
604 list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
605 &ife_ctx->res_list_ife_csid, list) {
606 cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
607 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
608 }
609
610 /* ife cid resource */
611 list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
612 &ife_ctx->res_list_ife_cid, list) {
613 cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
614 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
615 }
616
617 /* ife root node */
618 if (ife_ctx->res_list_ife_in.res_type != CAM_IFE_HW_MGR_RES_UNINIT)
619 cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_in);
620
621 /* clean up the callback function */
622 ife_ctx->common.cb_priv = NULL;
623 memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
624
625 CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
626 ife_ctx->ctx_index);
627
628 return 0;
629}
630
631
632static int cam_ife_hw_mgr_put_ctx(
633 struct list_head *src_list,
634 struct cam_ife_hw_mgr_ctx **ife_ctx)
635{
636 int rc = 0;
637 struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL;
638
639 mutex_lock(&g_ife_hw_mgr.ctx_mutex);
640 ctx_ptr = *ife_ctx;
641 if (ctx_ptr)
642 list_add_tail(&ctx_ptr->list, src_list);
643 *ife_ctx = NULL;
644 mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
645 return rc;
646}
647
648static int cam_ife_hw_mgr_get_ctx(
649 struct list_head *src_list,
650 struct cam_ife_hw_mgr_ctx **ife_ctx)
651{
652 int rc = 0;
653 struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL;
654
655 mutex_lock(&g_ife_hw_mgr.ctx_mutex);
656 if (!list_empty(src_list)) {
657 ctx_ptr = list_first_entry(src_list,
658 struct cam_ife_hw_mgr_ctx, list);
659 list_del_init(&ctx_ptr->list);
660 } else {
661 CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
662 rc = -1;
663 }
664 *ife_ctx = ctx_ptr;
665 mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
666
667 return rc;
668}
669
Raja Mallike3ed1a32019-08-22 17:12:32 +0530670static void cam_ife_hw_mgr_dump_all_ctx(
671 struct cam_ife_hw_mgr_ctx *ife_ctx)
672{
673 uint32_t i;
674 struct cam_ife_hw_mgr_ctx *ctx;
675 struct cam_ife_hw_mgr_res *hw_mgr;
676
677 mutex_lock(&g_ife_hw_mgr.ctx_mutex);
678 list_for_each_entry(ctx, &g_ife_hw_mgr.used_ctx_list, list) {
679 CAM_ERR_RATE_LIMIT(CAM_ISP,
680 "ctx id:%d dual:%d in src:%d num_base:%d rdi only:%d",
681 ctx->ctx_index,
682 ctx->res_list_ife_in.is_dual_vfe,
683 ctx->res_list_ife_in.res_id,
684 ctx->num_base, ctx->is_rdi_only_context);
685 list_for_each_entry(hw_mgr, &ctx->res_list_ife_csid,
686 list) {
687 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
688 if (!hw_mgr->hw_res[i])
689 continue;
690 CAM_ERR_RATE_LIMIT(CAM_ISP,
691 "csid:%d res_type:%s id:%s state:%d",
692 hw_mgr->hw_res[i]->hw_intf->hw_idx,
693 cam_ife_hw_mgr_get_res_type(
694 hw_mgr->hw_res[i]->res_type),
695 cam_ife_hw_mgr_get_res_id(
696 hw_mgr->hw_res[i]->res_id),
697 hw_mgr->hw_res[i]->res_state);
698 }
699 }
700 list_for_each_entry(hw_mgr, &ctx->res_list_ife_src,
701 list) {
702 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
703 if (!hw_mgr->hw_res[i])
704 continue;
705 CAM_ERR_RATE_LIMIT(CAM_ISP,
706 "Src IFE:%d res_type:%s id:%s state:%d",
707 hw_mgr->hw_res[i]->hw_intf->hw_idx,
708 cam_ife_hw_mgr_get_res_type(
709 hw_mgr->hw_res[i]->res_type),
710 cam_ife_hw_mgr_get_res_id(
711 hw_mgr->hw_res[i]->res_id),
712 hw_mgr->hw_res[i]->res_state);
713 }
714 }
715 }
716 CAM_ERR_RATE_LIMIT(CAM_ISP,
717 "Current ctx id:%d dual:%d in src:%d num_base:%d rdi only:%d",
718 ife_ctx->ctx_index,
719 ife_ctx->res_list_ife_in.is_dual_vfe,
720 ife_ctx->res_list_ife_in.res_id,
721 ife_ctx->num_base, ife_ctx->is_rdi_only_context);
722 mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
723}
724
Raja Mallikc7e256f2018-12-06 17:36:28 +0530725static void cam_ife_mgr_add_base_info(
726 struct cam_ife_hw_mgr_ctx *ctx,
727 enum cam_isp_hw_split_id split_id,
728 uint32_t base_idx)
729{
730 uint32_t i;
731
732 if (!ctx->num_base) {
733 ctx->base[0].split_id = split_id;
734 ctx->base[0].idx = base_idx;
735 ctx->num_base++;
736 CAM_DBG(CAM_ISP,
737 "Add split id = %d for base idx = %d num_base=%d",
738 split_id, base_idx, ctx->num_base);
739 } else {
740 /*Check if base index already exists in the list */
741 for (i = 0; i < ctx->num_base; i++) {
742 if (ctx->base[i].idx == base_idx) {
743 if (split_id != CAM_ISP_HW_SPLIT_MAX &&
744 ctx->base[i].split_id ==
745 CAM_ISP_HW_SPLIT_MAX)
746 ctx->base[i].split_id = split_id;
747
748 break;
749 }
750 }
751
752 if (i == ctx->num_base) {
753 ctx->base[ctx->num_base].split_id = split_id;
754 ctx->base[ctx->num_base].idx = base_idx;
755 ctx->num_base++;
756 CAM_DBG(CAM_ISP,
757 "Add split_id=%d for base idx=%d num_base=%d",
758 split_id, base_idx, ctx->num_base);
759 }
760 }
761}
762
763static int cam_ife_mgr_process_base_info(
764 struct cam_ife_hw_mgr_ctx *ctx)
765{
766 struct cam_ife_hw_mgr_res *hw_mgr_res;
767 struct cam_isp_resource_node *res = NULL;
768 uint32_t i;
769
770 if (list_empty(&ctx->res_list_ife_src)) {
771 CAM_ERR(CAM_ISP, "Mux List empty");
772 return -ENODEV;
773 }
774
775 /* IFE mux in resources */
776 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
777 if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
778 continue;
779
780 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
781 if (!hw_mgr_res->hw_res[i])
782 continue;
783
784 res = hw_mgr_res->hw_res[i];
785 cam_ife_mgr_add_base_info(ctx, i,
786 res->hw_intf->hw_idx);
787 CAM_DBG(CAM_ISP, "add base info for hw %d",
788 res->hw_intf->hw_idx);
789 }
790 }
791 CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
792
793 return 0;
794}
795
Raja Mallikfe46d932019-02-12 20:34:07 +0530796static int cam_ife_hw_mgr_acquire_res_bus_rd(
797 struct cam_ife_hw_mgr_ctx *ife_ctx,
798 struct cam_isp_in_port_info *in_port)
799{
800 int rc = -EINVAL;
801 struct cam_vfe_acquire_args vfe_acquire;
802 struct cam_ife_hw_mgr_res *ife_in_rd_res;
803 struct cam_hw_intf *hw_intf;
804 struct cam_ife_hw_mgr_res *ife_src_res;
805 int i;
806
807 CAM_DBG(CAM_ISP, "Enter");
808
809 list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) {
810 if (ife_src_res->res_id != CAM_ISP_HW_VFE_IN_RD)
811 continue;
812
813 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
814 &ife_in_rd_res);
815 if (rc) {
816 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
817 goto err;
818 }
819 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_in_rd,
820 &ife_in_rd_res);
821
822 vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_BUS_RD;
823 vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
824 vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
825 vfe_acquire.vfe_out.ctx = ife_ctx;
826 vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
827 vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
828 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
829 if (!ife_src_res->hw_res[i])
830 continue;
831
832 hw_intf = ife_src_res->hw_res[i]->hw_intf;
833 if (i == CAM_ISP_HW_SPLIT_LEFT) {
834 vfe_acquire.vfe_out.split_id =
835 CAM_ISP_HW_SPLIT_LEFT;
836 if (ife_src_res->is_dual_vfe) {
837 /*TBD */
838 vfe_acquire.vfe_out.is_master = 1;
839 vfe_acquire.vfe_out.dual_slave_core =
840 (hw_intf->hw_idx == 0) ? 1 : 0;
841 } else {
842 vfe_acquire.vfe_out.is_master = 0;
843 vfe_acquire.vfe_out.dual_slave_core =
844 0;
845 }
846 } else {
847 vfe_acquire.vfe_out.split_id =
848 CAM_ISP_HW_SPLIT_RIGHT;
849 vfe_acquire.vfe_out.is_master = 0;
850 vfe_acquire.vfe_out.dual_slave_core =
851 (hw_intf->hw_idx == 0) ? 1 : 0;
852 }
853 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
854 &vfe_acquire,
855 sizeof(struct cam_vfe_acquire_args));
856 if (rc) {
857 CAM_ERR(CAM_ISP,
858 "Can not acquire out resource 0x%x",
859 vfe_acquire.rsrc_type);
860 goto err;
861 }
862
863 ife_in_rd_res->hw_res[i] =
864 vfe_acquire.vfe_out.rsrc_node;
865 CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
866 ife_in_rd_res->hw_res[i]->res_type,
867 ife_in_rd_res->hw_res[i]->res_id);
868
869 }
870 ife_in_rd_res->is_dual_vfe = in_port->usage_type;
871 ife_in_rd_res->res_type = (enum cam_ife_hw_mgr_res_type)
872 CAM_ISP_RESOURCE_VFE_BUS_RD;
873 }
874
875 return 0;
876err:
877 CAM_DBG(CAM_ISP, "Exit rc(0x%x)", rc);
878 return rc;
879}
880
Raja Mallikc7e256f2018-12-06 17:36:28 +0530881static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
882 struct cam_ife_hw_mgr_ctx *ife_ctx,
883 struct cam_ife_hw_mgr_res *ife_src_res,
884 struct cam_isp_in_port_info *in_port)
885{
886 int rc = -EINVAL;
887 struct cam_vfe_acquire_args vfe_acquire;
888 struct cam_isp_out_port_info *out_port = NULL;
889 struct cam_ife_hw_mgr_res *ife_out_res;
890 struct cam_hw_intf *hw_intf;
891 uint32_t i, vfe_out_res_id, vfe_in_res_id;
892
893 /* take left resource */
894 vfe_in_res_id = ife_src_res->hw_res[0]->res_id;
895
896 switch (vfe_in_res_id) {
897 case CAM_ISP_HW_VFE_IN_RDI0:
898 vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_0;
899 break;
900 case CAM_ISP_HW_VFE_IN_RDI1:
901 vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_1;
902 break;
903 case CAM_ISP_HW_VFE_IN_RDI2:
904 vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_2;
905 break;
906 case CAM_ISP_HW_VFE_IN_RDI3:
907 vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
908 break;
909 default:
910 CAM_ERR(CAM_ISP, "invalid resource type");
911 goto err;
912 }
913 CAM_DBG(CAM_ISP, "vfe_in_res_id = %d, vfe_out_red_id = %d",
914 vfe_in_res_id, vfe_out_res_id);
915
916 vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
917 vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
918
919 ife_out_res = &ife_ctx->res_list_ife_out[vfe_out_res_id & 0xFF];
920 for (i = 0; i < in_port->num_out_res; i++) {
921 out_port = &in_port->data[i];
922
923 CAM_DBG(CAM_ISP, "i = %d, vfe_out_res_id = %d, out_port: %d",
924 i, vfe_out_res_id, out_port->res_type);
925
926 if (vfe_out_res_id != out_port->res_type)
927 continue;
928
929 vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
930 vfe_acquire.vfe_out.ctx = ife_ctx;
931 vfe_acquire.vfe_out.out_port_info = out_port;
932 vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
933 vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
934 vfe_acquire.vfe_out.is_dual = 0;
935 hw_intf = ife_src_res->hw_res[0]->hw_intf;
936 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
937 &vfe_acquire,
938 sizeof(struct cam_vfe_acquire_args));
939 if (rc) {
940 CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
941 out_port->res_type);
942 goto err;
943 }
944 break;
945 }
946
947 if (i == in_port->num_out_res) {
948 CAM_ERR(CAM_ISP,
949 "Cannot acquire out resource, i=%d, num_out_res=%d",
950 i, in_port->num_out_res);
951 goto err;
952 }
953
954 ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node;
955 ife_out_res->is_dual_vfe = 0;
956 ife_out_res->res_id = vfe_out_res_id;
957 ife_out_res->res_type = (enum cam_ife_hw_mgr_res_type)
958 CAM_ISP_RESOURCE_VFE_OUT;
959 ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
Raja Mallikfe46d932019-02-12 20:34:07 +0530960 CAM_DBG(CAM_ISP, "IFE SRC num_children = %d",
961 ife_src_res->num_children);
Raja Mallikc7e256f2018-12-06 17:36:28 +0530962
963 return 0;
964err:
965 return rc;
966}
967
968static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
969 struct cam_ife_hw_mgr_ctx *ife_ctx,
970 struct cam_ife_hw_mgr_res *ife_src_res,
971 struct cam_isp_in_port_info *in_port)
972{
973 int rc = -1;
974 uint32_t i, j, k;
975 struct cam_vfe_acquire_args vfe_acquire;
976 struct cam_isp_out_port_info *out_port;
977 struct cam_ife_hw_mgr_res *ife_out_res;
978 struct cam_hw_intf *hw_intf;
979
980 for (i = 0; i < in_port->num_out_res; i++) {
981 out_port = &in_port->data[i];
982 k = out_port->res_type & 0xFF;
983 if (k >= CAM_IFE_HW_OUT_RES_MAX) {
984 CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
985 out_port->res_type);
986 continue;
987 }
988
989 if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
990 continue;
991
992 CAM_DBG(CAM_ISP, "res_type 0x%x",
993 out_port->res_type);
994
995 ife_out_res = &ife_ctx->res_list_ife_out[k];
996 ife_out_res->is_dual_vfe = in_port->usage_type;
997
998 vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
999 vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
1000 vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
1001 vfe_acquire.vfe_out.ctx = ife_ctx;
1002 vfe_acquire.vfe_out.out_port_info = out_port;
1003 vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
1004 vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
1005
1006 for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
1007 if (!ife_src_res->hw_res[j])
1008 continue;
1009
1010 hw_intf = ife_src_res->hw_res[j]->hw_intf;
1011
1012 if (j == CAM_ISP_HW_SPLIT_LEFT) {
1013 vfe_acquire.vfe_out.split_id =
1014 CAM_ISP_HW_SPLIT_LEFT;
1015 if (ife_src_res->is_dual_vfe) {
1016 /*TBD */
1017 vfe_acquire.vfe_out.is_master = 1;
1018 vfe_acquire.vfe_out.dual_slave_core =
1019 (hw_intf->hw_idx == 0) ? 1 : 0;
1020 } else {
1021 vfe_acquire.vfe_out.is_master = 0;
1022 vfe_acquire.vfe_out.dual_slave_core =
1023 0;
1024 }
1025 } else {
1026 vfe_acquire.vfe_out.split_id =
1027 CAM_ISP_HW_SPLIT_RIGHT;
1028 vfe_acquire.vfe_out.is_master = 0;
1029 vfe_acquire.vfe_out.dual_slave_core =
1030 (hw_intf->hw_idx == 0) ? 1 : 0;
1031 }
1032
1033 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1034 &vfe_acquire,
1035 sizeof(struct cam_vfe_acquire_args));
1036 if (rc) {
1037 CAM_ERR(CAM_ISP,
1038 "Can not acquire out resource 0x%x",
1039 out_port->res_type);
1040 goto err;
1041 }
1042
1043 ife_out_res->hw_res[j] =
1044 vfe_acquire.vfe_out.rsrc_node;
1045 CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
1046 ife_out_res->hw_res[j]->res_type,
1047 ife_out_res->hw_res[j]->res_id);
1048
1049 }
1050 ife_out_res->res_type =
1051 (enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_VFE_OUT;
1052 ife_out_res->res_id = out_port->res_type;
1053 ife_out_res->parent = ife_src_res;
1054 ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
Raja Mallikfe46d932019-02-12 20:34:07 +05301055 CAM_DBG(CAM_ISP, "IFE SRC num_children = %d",
1056 ife_src_res->num_children);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301057 }
1058
1059 return 0;
1060err:
1061 /* release resource at the entry function */
1062 return rc;
1063}
1064
1065static int cam_ife_hw_mgr_acquire_res_ife_out(
1066 struct cam_ife_hw_mgr_ctx *ife_ctx,
1067 struct cam_isp_in_port_info *in_port)
1068{
1069 int rc = -EINVAL;
1070 struct cam_ife_hw_mgr_res *ife_src_res;
1071
1072 list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) {
1073 if (ife_src_res->num_children)
1074 continue;
1075
1076 switch (ife_src_res->res_id) {
1077 case CAM_ISP_HW_VFE_IN_CAMIF:
1078 case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
Raja Mallikfe46d932019-02-12 20:34:07 +05301079 case CAM_ISP_HW_VFE_IN_RD:
Raja Mallikc7e256f2018-12-06 17:36:28 +05301080 rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
1081 ife_src_res, in_port);
1082 break;
1083 case CAM_ISP_HW_VFE_IN_RDI0:
1084 case CAM_ISP_HW_VFE_IN_RDI1:
1085 case CAM_ISP_HW_VFE_IN_RDI2:
1086 case CAM_ISP_HW_VFE_IN_RDI3:
1087 rc = cam_ife_hw_mgr_acquire_res_ife_out_rdi(ife_ctx,
1088 ife_src_res, in_port);
1089 break;
1090 default:
1091 CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d",
1092 ife_src_res->res_id);
1093 break;
1094 }
1095 if (rc)
1096 goto err;
1097 }
1098
1099 return 0;
1100err:
1101 /* release resource on entry function */
1102 return rc;
1103}
1104
Raja Mallikfe46d932019-02-12 20:34:07 +05301105static int cam_ife_hw_mgr_acquire_res_ife_rd_src(
1106 struct cam_ife_hw_mgr_ctx *ife_ctx,
1107 struct cam_isp_in_port_info *in_port)
1108{
1109 int rc = -1;
1110 struct cam_ife_hw_mgr_res *csid_res;
1111 struct cam_ife_hw_mgr_res *ife_src_res;
1112 struct cam_vfe_acquire_args vfe_acquire;
1113 struct cam_hw_intf *hw_intf;
1114 struct cam_ife_hw_mgr *ife_hw_mgr;
1115 int vfe_idx = -1, i = 0;
1116
1117 ife_hw_mgr = ife_ctx->hw_mgr;
1118
1119 CAM_DBG(CAM_ISP, "Enter");
1120 list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
1121 if (csid_res->res_id != CAM_IFE_PIX_PATH_RES_RDI_0) {
1122 CAM_DBG(CAM_ISP, "not RDI0: %d", csid_res->res_id);
1123 continue;
1124 }
1125
1126 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
1127 &ife_src_res);
1128 if (rc) {
1129 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
1130 goto err;
1131 }
1132 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
1133 &ife_src_res);
1134
1135 CAM_DBG(CAM_ISP, "csid_res_id %d", csid_res->res_id);
1136 vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
1137 vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
1138 vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
1139 vfe_acquire.vfe_in.in_port = in_port;
1140 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RD;
1141 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1142
1143 ife_src_res->res_type =
1144 (enum cam_ife_hw_mgr_res_type)vfe_acquire.rsrc_type;
1145 ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
1146 ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
1147
1148 hw_intf =
1149 ife_hw_mgr->ife_devices[csid_res->hw_res[
1150 CAM_ISP_HW_SPLIT_LEFT]->hw_intf->hw_idx];
1151
1152 vfe_idx = csid_res->hw_res[
1153 CAM_ISP_HW_SPLIT_LEFT]->hw_intf->hw_idx;
1154
1155 /*
1156 * fill in more acquire information as needed
1157 */
1158 if (ife_src_res->is_dual_vfe)
1159 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_MASTER;
1160
1161 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1162 &vfe_acquire,
1163 sizeof(struct cam_vfe_acquire_args));
1164 if (rc) {
1165 CAM_ERR(CAM_ISP,
1166 "Can not acquire IFE HW res %d",
1167 csid_res->res_id);
1168 goto err;
1169 }
1170 ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] =
1171 vfe_acquire.vfe_in.rsrc_node;
1172 CAM_DBG(CAM_ISP,
1173 "acquire success IFE:%d res type :0x%x res id:0x%x",
1174 hw_intf->hw_idx,
1175 ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]->res_type,
1176 ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]->res_id);
1177
1178 if (!ife_src_res->is_dual_vfe)
1179 goto acq;
1180
1181 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
1182 if (i == CAM_ISP_HW_SPLIT_LEFT) {
1183 CAM_DBG(CAM_ISP, "vfe_idx %d is acquired",
1184 vfe_idx);
1185 continue;
1186 }
1187
1188 hw_intf = ife_hw_mgr->ife_devices[i];
1189
1190 /* fill in more acquire information as needed */
1191 if (i == CAM_ISP_HW_SPLIT_RIGHT)
1192 vfe_acquire.vfe_in.sync_mode =
1193 CAM_ISP_HW_SYNC_SLAVE;
1194
1195 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1196 &vfe_acquire,
1197 sizeof(struct cam_vfe_acquire_args));
1198 if (rc) {
1199 CAM_ERR(CAM_ISP,
1200 "Can not acquire IFE HW res %d",
1201 csid_res->res_id);
1202 goto err;
1203 }
1204 ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
1205 CAM_DBG(CAM_ISP,
1206 "acquire success IFE:%d res type :0x%x res id:0x%x",
1207 hw_intf->hw_idx,
1208 ife_src_res->hw_res[i]->res_type,
1209 ife_src_res->hw_res[i]->res_id);
1210 }
1211acq:
1212 /*
1213 * It should be one to one mapping between
1214 * csid resource and ife source resource
1215 */
1216 csid_res->child[0] = ife_src_res;
1217 ife_src_res->parent = csid_res;
1218 csid_res->child[csid_res->num_children++] = ife_src_res;
1219 CAM_DBG(CAM_ISP,
1220 "csid_res=%d CSID num_children=%d ife_src_res=%d",
1221 csid_res->res_id, csid_res->num_children,
1222 ife_src_res->res_id);
1223 }
1224
1225err:
1226 /* release resource at the entry function */
1227 CAM_DBG(CAM_ISP, "Exit rc(0x%x)", rc);
1228 return rc;
1229}
1230
Raja Mallikc7e256f2018-12-06 17:36:28 +05301231static int cam_ife_hw_mgr_acquire_res_ife_src(
1232 struct cam_ife_hw_mgr_ctx *ife_ctx,
1233 struct cam_isp_in_port_info *in_port)
1234{
1235 int rc = -1;
1236 int i;
1237 struct cam_ife_hw_mgr_res *csid_res;
1238 struct cam_ife_hw_mgr_res *ife_src_res;
1239 struct cam_vfe_acquire_args vfe_acquire;
1240 struct cam_hw_intf *hw_intf;
1241 struct cam_ife_hw_mgr *ife_hw_mgr;
1242
1243 ife_hw_mgr = ife_ctx->hw_mgr;
1244
1245 list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
1246 if (csid_res->num_children)
1247 continue;
1248
1249 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
1250 &ife_src_res);
1251 if (rc) {
1252 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
1253 goto err;
1254 }
1255 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
1256 &ife_src_res);
1257
1258 vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
1259 vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
1260 vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
1261 vfe_acquire.vfe_in.in_port = in_port;
1262
1263 switch (csid_res->res_id) {
1264 case CAM_IFE_PIX_PATH_RES_IPP:
1265 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
1266 if (csid_res->is_dual_vfe)
1267 vfe_acquire.vfe_in.sync_mode =
1268 CAM_ISP_HW_SYNC_MASTER;
1269 else
1270 vfe_acquire.vfe_in.sync_mode =
1271 CAM_ISP_HW_SYNC_NONE;
1272
1273 break;
1274 case CAM_IFE_PIX_PATH_RES_PPP:
1275 vfe_acquire.vfe_in.res_id =
1276 CAM_ISP_HW_VFE_IN_CAMIF_LITE;
1277 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1278
1279 break;
1280 case CAM_IFE_PIX_PATH_RES_RDI_0:
1281 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI0;
1282 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1283 break;
1284 case CAM_IFE_PIX_PATH_RES_RDI_1:
1285 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI1;
1286 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1287 break;
1288 case CAM_IFE_PIX_PATH_RES_RDI_2:
1289 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI2;
1290 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1291 break;
1292 case CAM_IFE_PIX_PATH_RES_RDI_3:
1293 vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI3;
1294 vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
1295 break;
1296 default:
1297 CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
1298 goto err;
1299 }
1300 ife_src_res->res_type =
1301 (enum cam_ife_hw_mgr_res_type)vfe_acquire.rsrc_type;
1302 ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
1303 ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
1304
1305 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
1306 if (!csid_res->hw_res[i])
1307 continue;
1308
1309 hw_intf = ife_hw_mgr->ife_devices[
1310 csid_res->hw_res[i]->hw_intf->hw_idx];
1311
1312 /* fill in more acquire information as needed */
1313 /* slave Camif resource, */
1314 if (i == CAM_ISP_HW_SPLIT_RIGHT &&
1315 ife_src_res->is_dual_vfe)
1316 vfe_acquire.vfe_in.sync_mode =
1317 CAM_ISP_HW_SYNC_SLAVE;
1318
1319 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1320 &vfe_acquire,
1321 sizeof(struct cam_vfe_acquire_args));
1322 if (rc) {
1323 CAM_ERR(CAM_ISP,
1324 "Can not acquire IFE HW res %d",
1325 csid_res->res_id);
1326 goto err;
1327 }
1328 ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
1329 CAM_DBG(CAM_ISP,
1330 "acquire success IFE:%d res type :0x%x res id:0x%x",
1331 hw_intf->hw_idx,
1332 ife_src_res->hw_res[i]->res_type,
1333 ife_src_res->hw_res[i]->res_id);
1334
1335 }
1336
1337 /* It should be one to one mapping between
1338 * csid resource and ife source resource
1339 */
1340 csid_res->child[0] = ife_src_res;
1341 ife_src_res->parent = csid_res;
1342 csid_res->child[csid_res->num_children++] = ife_src_res;
Raja Mallikfe46d932019-02-12 20:34:07 +05301343 CAM_DBG(CAM_ISP,
1344 "csid_res=%d CSID num_children=%d ife_src_res=%d",
Raja Mallikc7e256f2018-12-06 17:36:28 +05301345 csid_res->res_id, csid_res->num_children,
1346 ife_src_res->res_id);
1347 }
1348
1349 return 0;
1350err:
1351 /* release resource at the entry function */
1352 return rc;
1353}
1354
1355static int cam_ife_mgr_acquire_cid_res(
1356 struct cam_ife_hw_mgr_ctx *ife_ctx,
1357 struct cam_isp_in_port_info *in_port,
1358 struct cam_ife_hw_mgr_res **cid_res,
Raja Mallikfe46d932019-02-12 20:34:07 +05301359 enum cam_ife_pix_path_res_id path_res_id)
Raja Mallikc7e256f2018-12-06 17:36:28 +05301360{
1361 int rc = -1;
1362 int i, j;
1363 struct cam_ife_hw_mgr *ife_hw_mgr;
1364 struct cam_hw_intf *hw_intf;
1365 struct cam_ife_hw_mgr_res *cid_res_temp, *cid_res_iterator;
1366 struct cam_csid_hw_reserve_resource_args csid_acquire;
1367 uint32_t acquired_cnt = 0;
1368 struct cam_isp_out_port_info *out_port = NULL;
1369
1370 ife_hw_mgr = ife_ctx->hw_mgr;
1371 *cid_res = NULL;
1372
1373 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, cid_res);
1374 if (rc) {
1375 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
1376 goto end;
1377 }
1378
1379 cid_res_temp = *cid_res;
1380
1381 csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
1382 csid_acquire.in_port = in_port;
Raja Mallikfe46d932019-02-12 20:34:07 +05301383 csid_acquire.res_id = path_res_id;
1384 CAM_DBG(CAM_ISP, "path_res_id %d", path_res_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301385
1386 if (in_port->num_out_res)
1387 out_port = &(in_port->data[0]);
1388
1389 /* Try acquiring CID resource from previously acquired HW */
1390 list_for_each_entry(cid_res_iterator, &ife_ctx->res_list_ife_cid,
1391 list) {
1392
1393 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
1394 if (!cid_res_iterator->hw_res[i])
1395 continue;
1396
1397 if (cid_res_iterator->is_secure == 1 ||
1398 (cid_res_iterator->is_secure == 0 &&
1399 in_port->num_out_res &&
1400 out_port->secure_mode == 1))
1401 continue;
1402
1403 hw_intf = cid_res_iterator->hw_res[i]->hw_intf;
1404 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1405 &csid_acquire, sizeof(csid_acquire));
1406 if (rc) {
1407 CAM_DBG(CAM_ISP,
1408 "No ife cid resource from hw %d",
1409 hw_intf->hw_idx);
1410 continue;
1411 }
1412
1413 cid_res_temp->hw_res[acquired_cnt++] =
1414 csid_acquire.node_res;
1415
1416 CAM_DBG(CAM_ISP,
Raja Mallikfe46d932019-02-12 20:34:07 +05301417 "acquired from old csid(%s)=%d CID rsrc successfully",
Raja Mallikc7e256f2018-12-06 17:36:28 +05301418 (i == 0) ? "left" : "right",
1419 hw_intf->hw_idx);
1420
1421 if (in_port->usage_type && acquired_cnt == 1 &&
Raja Mallikfe46d932019-02-12 20:34:07 +05301422 path_res_id == CAM_IFE_PIX_PATH_RES_IPP)
Raja Mallikc7e256f2018-12-06 17:36:28 +05301423 /*
1424 * Continue to acquire Right for IPP.
1425 * Dual IFE for RDI and PPP is not currently
1426 * supported.
1427 */
1428
1429 continue;
1430
1431 if (acquired_cnt)
1432 /*
1433 * If successfully acquired CID from
1434 * previously acquired HW, skip the next
1435 * part
1436 */
1437 goto acquire_successful;
1438 }
1439 }
1440
1441 /* Acquire Left if not already acquired */
Raja Mallikfe46d932019-02-12 20:34:07 +05301442 if (ife_ctx->is_fe_enable) {
1443 for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
1444 if (!ife_hw_mgr->csid_devices[i])
1445 continue;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301446
Raja Mallikfe46d932019-02-12 20:34:07 +05301447 hw_intf = ife_hw_mgr->csid_devices[i];
1448 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1449 &csid_acquire, sizeof(csid_acquire));
1450 if (rc)
1451 continue;
1452 else {
1453 cid_res_temp->hw_res[acquired_cnt++] =
1454 csid_acquire.node_res;
1455 break;
1456 }
1457 }
1458 if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
1459 CAM_ERR(CAM_ISP,
1460 "Can not acquire ife cid resource for path %d",
1461 path_res_id);
1462 goto put_res;
1463 }
1464 } else {
1465 for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
1466 if (!ife_hw_mgr->csid_devices[i])
1467 continue;
1468
1469 hw_intf = ife_hw_mgr->csid_devices[i];
1470 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1471 &csid_acquire, sizeof(csid_acquire));
1472 if (rc)
1473 continue;
1474 else {
1475 cid_res_temp->hw_res[acquired_cnt++] =
1476 csid_acquire.node_res;
1477 break;
1478 }
1479 }
1480 if (i == -1 || !csid_acquire.node_res) {
1481 CAM_ERR(CAM_ISP,
1482 "Can not acquire ife cid resource for path %d",
1483 path_res_id);
1484 goto put_res;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301485 }
1486 }
1487
Raja Mallikc7e256f2018-12-06 17:36:28 +05301488
1489acquire_successful:
1490 CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d",
1491 in_port->usage_type);
1492
1493 cid_res_temp->res_type = CAM_IFE_HW_MGR_RES_CID;
1494 /* CID(DT_ID) value of acquire device, require for path */
1495 cid_res_temp->res_id = csid_acquire.node_res->res_id;
1496 cid_res_temp->is_dual_vfe = in_port->usage_type;
1497
1498 if (in_port->num_out_res)
1499 cid_res_temp->is_secure = out_port->secure_mode;
1500
1501 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res);
1502
1503 /*
1504 * Acquire Right if not already acquired.
1505 * Dual IFE for RDI and PPP is not currently supported.
1506 */
Raja Mallikfe46d932019-02-12 20:34:07 +05301507 if (cid_res_temp->is_dual_vfe && path_res_id
Raja Mallikc7e256f2018-12-06 17:36:28 +05301508 == CAM_IFE_PIX_PATH_RES_IPP && acquired_cnt == 1) {
1509 csid_acquire.node_res = NULL;
1510 csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
1511 csid_acquire.in_port = in_port;
1512 for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
1513 if (!ife_hw_mgr->csid_devices[j])
1514 continue;
1515
1516 if (j == cid_res_temp->hw_res[0]->hw_intf->hw_idx)
1517 continue;
1518
1519 hw_intf = ife_hw_mgr->csid_devices[j];
1520 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1521 &csid_acquire, sizeof(csid_acquire));
1522 if (rc)
1523 continue;
1524 else
1525 break;
1526 }
1527
1528 if (j == CAM_IFE_CSID_HW_NUM_MAX) {
1529 CAM_ERR(CAM_ISP,
1530 "Can not acquire ife csid rdi resource");
1531 goto end;
1532 }
1533 cid_res_temp->hw_res[1] = csid_acquire.node_res;
1534 CAM_DBG(CAM_ISP, "CID right acquired success is_dual %d",
1535 in_port->usage_type);
1536 }
1537 cid_res_temp->parent = &ife_ctx->res_list_ife_in;
1538 ife_ctx->res_list_ife_in.child[
1539 ife_ctx->res_list_ife_in.num_children++] = cid_res_temp;
Raja Mallikfe46d932019-02-12 20:34:07 +05301540 CAM_DBG(CAM_ISP, "IFE IN num_children = %d",
1541 ife_ctx->res_list_ife_in.num_children);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301542
1543 return 0;
1544put_res:
1545 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, cid_res);
1546end:
1547 return rc;
1548
1549}
1550
1551static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl(
1552 struct cam_ife_hw_mgr_ctx *ife_ctx,
1553 struct cam_isp_in_port_info *in_port,
1554 bool is_ipp)
1555{
1556 int rc = -1;
1557 int i;
1558 int master_idx = -1;
1559
1560 struct cam_ife_hw_mgr *ife_hw_mgr;
1561 struct cam_ife_hw_mgr_res *csid_res;
1562 struct cam_ife_hw_mgr_res *cid_res;
1563 struct cam_hw_intf *hw_intf;
1564 struct cam_csid_hw_reserve_resource_args csid_acquire;
1565 enum cam_ife_pix_path_res_id path_res_id;
1566
1567 ife_hw_mgr = ife_ctx->hw_mgr;
1568 /* get cid resource */
1569 if (is_ipp)
1570 path_res_id = CAM_IFE_PIX_PATH_RES_IPP;
1571 else
1572 path_res_id = CAM_IFE_PIX_PATH_RES_PPP;
1573
1574 rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
1575 path_res_id);
1576
1577 if (rc) {
1578 CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
1579 goto end;
1580 }
1581
1582 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
1583 if (rc) {
1584 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
1585 goto end;
1586 }
1587
1588 csid_res->res_type =
1589 (enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_PIX_PATH;
1590
1591 csid_res->res_id = path_res_id;
1592
1593 if (in_port->usage_type && is_ipp)
1594 csid_res->is_dual_vfe = 1;
1595 else {
1596 csid_res->is_dual_vfe = 0;
1597 csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
1598 }
1599
Raja Mallikfe46d932019-02-12 20:34:07 +05301600 CAM_DBG(CAM_ISP, "CSID Acq: E");
Raja Mallikc7e256f2018-12-06 17:36:28 +05301601 /* IPP resource needs to be from same HW as CID resource */
1602 for (i = 0; i <= csid_res->is_dual_vfe; i++) {
1603 CAM_DBG(CAM_ISP, "i %d is_dual %d", i, csid_res->is_dual_vfe);
1604
1605 csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
1606 csid_acquire.res_id = path_res_id;
1607 csid_acquire.cid = cid_res->hw_res[i]->res_id;
1608 csid_acquire.in_port = in_port;
1609 csid_acquire.out_port = in_port->data;
1610 csid_acquire.node_res = NULL;
1611
1612 hw_intf = cid_res->hw_res[i]->hw_intf;
1613
1614 if (csid_res->is_dual_vfe) {
1615 if (i == CAM_ISP_HW_SPLIT_LEFT) {
1616 master_idx = hw_intf->hw_idx;
1617 csid_acquire.sync_mode =
1618 CAM_ISP_HW_SYNC_MASTER;
1619 } else {
1620 if (master_idx == -1) {
1621 CAM_ERR(CAM_ISP,
1622 "No Master found");
1623 goto put_res;
1624 }
1625 csid_acquire.sync_mode =
1626 CAM_ISP_HW_SYNC_SLAVE;
1627 csid_acquire.master_idx = master_idx;
1628 }
1629 }
1630
1631 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1632 &csid_acquire, sizeof(csid_acquire));
1633 if (rc) {
1634 CAM_ERR(CAM_ISP,
Raja Mallike3ed1a32019-08-22 17:12:32 +05301635 "Cannot acquire ife csid pxl path rsrc %s, hw=%d rc=%d",
1636 (is_ipp) ? "IPP" : "PPP",
1637 hw_intf->hw_idx, rc);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301638 goto put_res;
1639 }
1640
1641 csid_res->hw_res[i] = csid_acquire.node_res;
1642 CAM_DBG(CAM_ISP,
1643 "acquired csid(%s)=%d pxl path rsrc %s successfully",
1644 (i == 0) ? "left" : "right", hw_intf->hw_idx,
1645 (is_ipp) ? "IPP" : "PPP");
1646 }
1647 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
1648
1649 csid_res->parent = cid_res;
1650 cid_res->child[cid_res->num_children++] = csid_res;
1651
Raja Mallikfe46d932019-02-12 20:34:07 +05301652 CAM_DBG(CAM_ISP, "acquire res %d CID children = %d",
1653 csid_acquire.res_id, cid_res->num_children);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301654 return 0;
1655put_res:
1656 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
1657end:
1658 return rc;
1659}
1660
1661static enum cam_ife_pix_path_res_id
1662 cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
1663 uint32_t out_port_type)
1664{
1665 enum cam_ife_pix_path_res_id path_id;
Raja Mallikfe46d932019-02-12 20:34:07 +05301666 CAM_DBG(CAM_ISP, "out_port_type %x", out_port_type);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301667
1668 switch (out_port_type) {
1669 case CAM_ISP_IFE_OUT_RES_RDI_0:
1670 path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
1671 break;
1672 case CAM_ISP_IFE_OUT_RES_RDI_1:
1673 path_id = CAM_IFE_PIX_PATH_RES_RDI_1;
1674 break;
1675 case CAM_ISP_IFE_OUT_RES_RDI_2:
1676 path_id = CAM_IFE_PIX_PATH_RES_RDI_2;
1677 break;
1678 case CAM_ISP_IFE_OUT_RES_RDI_3:
1679 path_id = CAM_IFE_PIX_PATH_RES_RDI_3;
1680 break;
1681 default:
1682 path_id = CAM_IFE_PIX_PATH_RES_MAX;
1683 CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
1684 break;
1685 }
1686
Raja Mallikfe46d932019-02-12 20:34:07 +05301687 CAM_DBG(CAM_ISP, "out_port %x path_id %d", out_port_type, path_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301688
1689 return path_id;
1690}
1691
1692static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
1693 struct cam_ife_hw_mgr_ctx *ife_ctx,
1694 struct cam_isp_in_port_info *in_port)
1695{
1696 int rc = -EINVAL;
1697 int i;
1698
1699 struct cam_ife_hw_mgr *ife_hw_mgr;
1700 struct cam_ife_hw_mgr_res *csid_res;
1701 struct cam_ife_hw_mgr_res *cid_res;
1702 struct cam_hw_intf *hw_intf;
1703 struct cam_isp_out_port_info *out_port;
1704 struct cam_csid_hw_reserve_resource_args csid_acquire;
Raja Mallikfe46d932019-02-12 20:34:07 +05301705 enum cam_ife_pix_path_res_id path_res_id;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301706
1707 ife_hw_mgr = ife_ctx->hw_mgr;
1708
1709 for (i = 0; i < in_port->num_out_res; i++) {
1710 out_port = &in_port->data[i];
Raja Mallikfe46d932019-02-12 20:34:07 +05301711 path_res_id = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
Raja Mallikc7e256f2018-12-06 17:36:28 +05301712 out_port->res_type);
Raja Mallikfe46d932019-02-12 20:34:07 +05301713 if (path_res_id == CAM_IFE_PIX_PATH_RES_MAX)
Raja Mallikc7e256f2018-12-06 17:36:28 +05301714 continue;
1715
1716 /* get cid resource */
1717 rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
Raja Mallikfe46d932019-02-12 20:34:07 +05301718 path_res_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301719 if (rc) {
1720 CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
1721 goto end;
1722 }
1723
1724 /* For each RDI we need CID + PATH resource */
1725 rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
1726 &csid_res);
1727 if (rc) {
1728 CAM_ERR(CAM_ISP, "No more free hw mgr resource");
1729 goto end;
1730 }
1731
1732 memset(&csid_acquire, 0, sizeof(csid_acquire));
Raja Mallikfe46d932019-02-12 20:34:07 +05301733 csid_acquire.res_id = path_res_id;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301734 csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
1735 csid_acquire.cid = cid_res->hw_res[0]->res_id;
1736 csid_acquire.in_port = in_port;
1737 csid_acquire.out_port = out_port;
1738 csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
1739 csid_acquire.node_res = NULL;
1740
1741 hw_intf = cid_res->hw_res[0]->hw_intf;
1742 rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
1743 &csid_acquire, sizeof(csid_acquire));
1744 if (rc) {
1745 CAM_ERR(CAM_ISP,
1746 "CSID Path reserve failed hw=%d rc=%d cid=%d",
1747 hw_intf->hw_idx, rc,
1748 cid_res->hw_res[0]->res_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301749 goto put_res;
1750 }
1751
1752 if (csid_acquire.node_res == NULL) {
1753 CAM_ERR(CAM_ISP, "Acquire CSID RDI rsrc failed");
1754
1755 goto put_res;
1756 }
1757
1758 csid_res->res_type = (enum cam_ife_hw_mgr_res_type)
1759 CAM_ISP_RESOURCE_PIX_PATH;
1760 csid_res->res_id = csid_acquire.res_id;
1761 csid_res->is_dual_vfe = 0;
1762 csid_res->hw_res[0] = csid_acquire.node_res;
1763 csid_res->hw_res[1] = NULL;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301764 csid_res->parent = cid_res;
1765 cid_res->child[cid_res->num_children++] =
1766 csid_res;
Raja Mallikfe46d932019-02-12 20:34:07 +05301767 CAM_DBG(CAM_ISP, "acquire res %d CID children = %d",
1768 csid_acquire.res_id, cid_res->num_children);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301769 cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
1770
1771 }
1772
1773 return 0;
1774put_res:
1775 cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
1776end:
1777 return rc;
1778}
1779
1780static int cam_ife_hw_mgr_acquire_res_root(
1781 struct cam_ife_hw_mgr_ctx *ife_ctx,
1782 struct cam_isp_in_port_info *in_port)
1783{
1784 int rc = -1;
1785
1786 if (ife_ctx->res_list_ife_in.res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
1787 /* first acquire */
1788 ife_ctx->res_list_ife_in.res_type = CAM_IFE_HW_MGR_RES_ROOT;
1789 ife_ctx->res_list_ife_in.res_id = in_port->res_type;
1790 ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
Raja Mallikfe46d932019-02-12 20:34:07 +05301791 } else if ((ife_ctx->res_list_ife_in.res_id !=
1792 in_port->res_type) && (!ife_ctx->is_fe_enable)) {
Raja Mallikc7e256f2018-12-06 17:36:28 +05301793 CAM_ERR(CAM_ISP, "No Free resource for this context");
1794 goto err;
1795 } else {
1796 /* else do nothing */
1797 }
1798 return 0;
1799err:
1800 /* release resource in entry function */
1801 return rc;
1802}
1803
Raja Mallikfe46d932019-02-12 20:34:07 +05301804static int cam_ife_mgr_check_and_update_fe(
1805 struct cam_ife_hw_mgr_ctx *ife_ctx,
1806 struct cam_isp_acquire_hw_info *acquire_hw_info)
1807{
1808 int i;
1809 struct cam_isp_in_port_info *in_port = NULL;
1810 uint32_t in_port_length = 0;
1811 uint32_t total_in_port_length = 0;
1812
1813 in_port = (struct cam_isp_in_port_info *)
1814 ((uint8_t *)&acquire_hw_info->data +
1815 acquire_hw_info->input_info_offset);
1816 for (i = 0; i < acquire_hw_info->num_inputs; i++) {
Raja Mallik9e335332019-04-04 14:48:25 +05301817
1818 if ((in_port->num_out_res > CAM_IFE_HW_OUT_RES_MAX) ||
1819 (in_port->num_out_res <= 0)) {
1820 CAM_ERR(CAM_ISP, "Invalid num output res %u",
1821 in_port->num_out_res);
1822 return -EINVAL;
1823 }
1824
Raja Mallikfe46d932019-02-12 20:34:07 +05301825 in_port_length = sizeof(struct cam_isp_in_port_info) +
1826 (in_port->num_out_res - 1) *
1827 sizeof(struct cam_isp_out_port_info);
1828 total_in_port_length += in_port_length;
1829
1830 if (total_in_port_length > acquire_hw_info->input_info_size) {
1831 CAM_ERR(CAM_ISP, "buffer size is not enough");
1832 return -EINVAL;
1833 }
1834 CAM_DBG(CAM_ISP, "in_port%d res_type %d", i,
1835 in_port->res_type);
1836 if (in_port->res_type == CAM_ISP_IFE_IN_RES_RD) {
1837 ife_ctx->is_fe_enable = true;
1838 break;
1839 }
1840
1841 in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port +
1842 in_port_length);
1843 }
1844 CAM_DBG(CAM_ISP, "is_fe_enable %d", ife_ctx->is_fe_enable);
1845
1846 return 0;
1847}
1848
1849static int cam_ife_hw_mgr_preprocess_port(
Raja Mallikc7e256f2018-12-06 17:36:28 +05301850 struct cam_ife_hw_mgr_ctx *ife_ctx,
1851 struct cam_isp_in_port_info *in_port,
1852 int *ipp_count,
1853 int *rdi_count,
Raja Mallikfe46d932019-02-12 20:34:07 +05301854 int *ppp_count,
1855 int *ife_rd_count)
Raja Mallikc7e256f2018-12-06 17:36:28 +05301856{
1857 int ipp_num = 0;
1858 int rdi_num = 0;
1859 int ppp_num = 0;
Raja Mallikfe46d932019-02-12 20:34:07 +05301860 int ife_rd_num = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301861 uint32_t i;
1862 struct cam_isp_out_port_info *out_port;
1863 struct cam_ife_hw_mgr *ife_hw_mgr;
1864
1865 ife_hw_mgr = ife_ctx->hw_mgr;
1866
Raja Mallikfe46d932019-02-12 20:34:07 +05301867 if (in_port->res_type == CAM_ISP_IFE_IN_RES_RD) {
1868 ife_rd_num++;
1869 } else {
1870 for (i = 0; i < in_port->num_out_res; i++) {
1871 out_port = &in_port->data[i];
1872 if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
1873 rdi_num++;
1874 else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_2PD)
1875 ppp_num++;
1876 else {
1877 CAM_DBG(CAM_ISP, "out_res_type %d",
1878 out_port->res_type);
1879 ipp_num++;
1880 }
1881 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05301882 }
1883
1884 *ipp_count = ipp_num;
1885 *rdi_count = rdi_num;
1886 *ppp_count = ppp_num;
Raja Mallikfe46d932019-02-12 20:34:07 +05301887 *ife_rd_count = ife_rd_num;
1888
1889 CAM_DBG(CAM_ISP, "rdi: %d ipp: %d ppp: %d ife_rd: %d",
1890 rdi_num, ipp_num, ppp_num, ife_rd_num);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301891
1892 return 0;
1893}
1894
1895static int cam_ife_mgr_acquire_hw_for_ctx(
1896 struct cam_ife_hw_mgr_ctx *ife_ctx,
1897 struct cam_isp_in_port_info *in_port,
1898 uint32_t *num_pix_port, uint32_t *num_rdi_port)
1899{
1900 int rc = -1;
1901 int is_dual_vfe = 0;
1902 int ipp_count = 0;
1903 int rdi_count = 0;
1904 int ppp_count = 0;
Raja Mallikfe46d932019-02-12 20:34:07 +05301905 int ife_rd_count = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301906
1907 is_dual_vfe = in_port->usage_type;
1908
1909 /* get root node resource */
1910 rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
1911 if (rc) {
1912 CAM_ERR(CAM_ISP, "Can not acquire csid rx resource");
1913 goto err;
1914 }
1915
Raja Mallikfe46d932019-02-12 20:34:07 +05301916 cam_ife_hw_mgr_preprocess_port(ife_ctx, in_port,
1917 &ipp_count, &rdi_count, &ppp_count, &ife_rd_count);
Raja Mallikc7e256f2018-12-06 17:36:28 +05301918
Raja Mallikfe46d932019-02-12 20:34:07 +05301919 if (!ipp_count && !rdi_count && !ppp_count && !ife_rd_count) {
1920 CAM_ERR(CAM_ISP, "No PIX or RDI or PPP or IFE RD resource");
Raja Mallikc7e256f2018-12-06 17:36:28 +05301921 return -EINVAL;
1922 }
1923
1924 if (ipp_count) {
1925 /* get ife csid IPP resource */
1926 rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
1927 in_port, true);
1928 if (rc) {
1929 CAM_ERR(CAM_ISP,
1930 "Acquire IFE CSID IPP resource Failed");
1931 goto err;
1932 }
1933 }
1934
1935 if (rdi_count) {
1936 /* get ife csid rdi resource */
1937 rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port);
1938 if (rc) {
1939 CAM_ERR(CAM_ISP,
1940 "Acquire IFE CSID RDI resource Failed");
1941 goto err;
1942 }
1943 }
1944
1945 if (ppp_count) {
1946 /* get ife csid PPP resource */
1947 rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
1948 in_port, false);
1949 if (rc) {
1950 CAM_ERR(CAM_ISP,
1951 "Acquire IFE CSID PPP resource Failed");
1952 goto err;
1953 }
1954 }
1955
1956
1957 /* get ife src resource */
Raja Mallikfe46d932019-02-12 20:34:07 +05301958 if (ife_rd_count) {
1959 rc = cam_ife_hw_mgr_acquire_res_ife_rd_src(ife_ctx, in_port);
1960 rc = cam_ife_hw_mgr_acquire_res_bus_rd(ife_ctx, in_port);
1961 } else {
1962 rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
1963 }
1964
Raja Mallikc7e256f2018-12-06 17:36:28 +05301965 if (rc) {
1966 CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
1967 goto err;
1968 }
1969
Raja Mallikfe46d932019-02-12 20:34:07 +05301970 CAM_DBG(CAM_ISP, "Acquiring IFE OUT resource...");
Raja Mallikc7e256f2018-12-06 17:36:28 +05301971 rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
1972 if (rc) {
1973 CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed");
1974 goto err;
1975 }
1976
Raja Mallikfe46d932019-02-12 20:34:07 +05301977 *num_pix_port += ipp_count + ppp_count + ife_rd_count;
Raja Mallikc7e256f2018-12-06 17:36:28 +05301978 *num_rdi_port += rdi_count;
1979
1980 return 0;
1981err:
1982 /* release resource at the acquire entry funciton */
1983 return rc;
1984}
1985
1986void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
1987 enum cam_cdm_cb_status status, uint64_t cookie)
1988{
1989 struct cam_ife_hw_mgr_ctx *ctx = NULL;
1990
1991 if (!userdata) {
1992 CAM_ERR(CAM_ISP, "Invalid args");
1993 return;
1994 }
1995
1996 ctx = userdata;
1997
1998 if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
1999 complete(&ctx->config_done_complete);
2000 CAM_DBG(CAM_ISP,
Raja Mallikfe46d932019-02-12 20:34:07 +05302001 "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d",
2002 handle, userdata, status, cookie, ctx->ctx_index);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302003 } else {
2004 CAM_WARN(CAM_ISP,
2005 "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
2006 handle, userdata, status, cookie);
2007 }
2008}
2009
2010/* entry function: acquire_hw */
2011static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
2012{
2013 struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv;
2014 struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
2015 int rc = -1;
2016 int i, j;
2017 struct cam_ife_hw_mgr_ctx *ife_ctx;
2018 struct cam_isp_in_port_info *in_port = NULL;
2019 struct cam_cdm_acquire_data cdm_acquire;
2020 uint32_t num_pix_port_per_in = 0;
2021 uint32_t num_rdi_port_per_in = 0;
2022 uint32_t total_pix_port = 0;
2023 uint32_t total_rdi_port = 0;
2024 uint32_t in_port_length = 0;
2025 uint32_t total_in_port_length = 0;
2026 struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
2027
2028 CAM_DBG(CAM_ISP, "Enter...");
2029
2030 if (!acquire_args || acquire_args->num_acq <= 0) {
2031 CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
2032 return -EINVAL;
2033 }
2034
2035 /* get the ife ctx */
2036 rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
2037 if (rc || !ife_ctx) {
2038 CAM_ERR(CAM_ISP, "Get ife hw context failed");
2039 goto err;
2040 }
2041
2042 ife_ctx->common.cb_priv = acquire_args->context_data;
2043 for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
2044 ife_ctx->common.event_cb[i] = acquire_args->event_cb;
2045
2046 ife_ctx->hw_mgr = ife_hw_mgr;
2047
2048
2049 memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
2050 cdm_acquire.cell_index = 0;
2051 cdm_acquire.handle = 0;
2052 cdm_acquire.userdata = ife_ctx;
2053 cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
2054 for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
2055 if (ife_hw_mgr->cdm_reg_map[i])
2056 cdm_acquire.base_array[j++] =
2057 ife_hw_mgr->cdm_reg_map[i];
2058 }
2059 cdm_acquire.base_array_cnt = j;
2060
Raja Mallikc7e256f2018-12-06 17:36:28 +05302061 cdm_acquire.id = CAM_CDM_VIRTUAL;
2062 cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
2063 rc = cam_cdm_acquire(&cdm_acquire);
2064 if (rc) {
2065 CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
2066 goto free_ctx;
2067 }
2068
2069 CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
2070 cdm_acquire.handle);
2071 ife_ctx->cdm_handle = cdm_acquire.handle;
2072 ife_ctx->cdm_ops = cdm_acquire.ops;
2073
2074 acquire_hw_info =
2075 (struct cam_isp_acquire_hw_info *)acquire_args->acquire_info;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302076
Raja Mallikfe46d932019-02-12 20:34:07 +05302077 rc = cam_ife_mgr_check_and_update_fe(ife_ctx, acquire_hw_info);
2078 if (rc) {
2079 CAM_ERR(CAM_ISP, "buffer size is not enough");
Raja Mallik9e335332019-04-04 14:48:25 +05302080 goto free_cdm;
Raja Mallikfe46d932019-02-12 20:34:07 +05302081 }
2082
Raja Mallik9e335332019-04-04 14:48:25 +05302083 in_port = (struct cam_isp_in_port_info *)
2084 ((uint8_t *)&acquire_hw_info->data +
2085 acquire_hw_info->input_info_offset);
2086
Raja Mallikc7e256f2018-12-06 17:36:28 +05302087 /* acquire HW resources */
2088 for (i = 0; i < acquire_hw_info->num_inputs; i++) {
Raja Mallikff6c75b2019-01-29 16:52:37 +05302089
Raja Mallik9e335332019-04-04 14:48:25 +05302090 if ((in_port->num_out_res > CAM_IFE_HW_OUT_RES_MAX) ||
2091 (in_port->num_out_res <= 0)) {
2092 CAM_ERR(CAM_ISP, "Invalid num output res %u",
Raja Mallikff6c75b2019-01-29 16:52:37 +05302093 in_port->num_out_res);
2094 rc = -EINVAL;
2095 goto free_res;
2096 }
2097
Raja Mallikc7e256f2018-12-06 17:36:28 +05302098 in_port_length = sizeof(struct cam_isp_in_port_info) +
2099 (in_port->num_out_res - 1) *
2100 sizeof(struct cam_isp_out_port_info);
2101 total_in_port_length += in_port_length;
2102
2103 if (total_in_port_length > acquire_hw_info->input_info_size) {
2104 CAM_ERR(CAM_ISP, "buffer size is not enough");
2105 rc = -EINVAL;
2106 goto free_res;
2107 }
Raja Mallikfe46d932019-02-12 20:34:07 +05302108 CAM_DBG(CAM_ISP, "in_res_type %x", in_port->res_type);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302109 rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
2110 &num_pix_port_per_in, &num_rdi_port_per_in);
2111 total_pix_port += num_pix_port_per_in;
2112 total_rdi_port += num_rdi_port_per_in;
2113
2114 if (rc) {
2115 CAM_ERR(CAM_ISP, "can not acquire resource");
2116 goto free_res;
2117 }
2118 in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port +
2119 in_port_length);
2120 }
2121
2122 /* Check whether context has only RDI resource */
2123 if (!total_pix_port) {
2124 ife_ctx->is_rdi_only_context = 1;
2125 CAM_DBG(CAM_ISP, "RDI only context");
2126 }
2127
2128 /* Process base info */
2129 rc = cam_ife_mgr_process_base_info(ife_ctx);
2130 if (rc) {
2131 CAM_ERR(CAM_ISP, "Process base info failed");
2132 goto free_res;
2133 }
2134
2135 acquire_args->ctxt_to_hw_map = ife_ctx;
2136 ife_ctx->ctx_in_use = 1;
2137
2138 cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
2139
2140 CAM_DBG(CAM_ISP, "Exit...(success)");
2141
2142 return 0;
2143free_res:
Raja Mallike3ed1a32019-08-22 17:12:32 +05302144 /*Dump all the current acquired resources */
2145 cam_ife_hw_mgr_dump_all_ctx(ife_ctx);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302146 cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
Raja Mallik9e335332019-04-04 14:48:25 +05302147free_cdm:
Raja Mallikc7e256f2018-12-06 17:36:28 +05302148 cam_cdm_release(ife_ctx->cdm_handle);
2149free_ctx:
2150 cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
2151err:
2152 CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
2153 return rc;
2154}
2155
2156/* entry function: acquire_hw */
2157static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
2158{
2159 struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv;
2160 struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
2161 int rc = -1;
2162 int i, j;
2163 struct cam_ife_hw_mgr_ctx *ife_ctx;
2164 struct cam_isp_in_port_info *in_port = NULL;
2165 struct cam_isp_resource *isp_resource = NULL;
2166 struct cam_cdm_acquire_data cdm_acquire;
2167 uint32_t num_pix_port_per_in = 0;
2168 uint32_t num_rdi_port_per_in = 0;
2169 uint32_t total_pix_port = 0;
2170 uint32_t total_rdi_port = 0;
2171 uint32_t in_port_length = 0;
2172
2173 CAM_DBG(CAM_ISP, "Enter...");
2174
2175 if (!acquire_args || acquire_args->num_acq <= 0) {
2176 CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
2177 return -EINVAL;
2178 }
2179
2180 /* get the ife ctx */
2181 rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
2182 if (rc || !ife_ctx) {
2183 CAM_ERR(CAM_ISP, "Get ife hw context failed");
2184 goto err;
2185 }
2186
2187 ife_ctx->common.cb_priv = acquire_args->context_data;
2188 for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
2189 ife_ctx->common.event_cb[i] = acquire_args->event_cb;
2190
2191 ife_ctx->hw_mgr = ife_hw_mgr;
2192
2193
2194 memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
2195 cdm_acquire.cell_index = 0;
2196 cdm_acquire.handle = 0;
2197 cdm_acquire.userdata = ife_ctx;
2198 cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
2199 for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
2200 if (ife_hw_mgr->cdm_reg_map[i])
2201 cdm_acquire.base_array[j++] =
2202 ife_hw_mgr->cdm_reg_map[i];
2203 }
2204 cdm_acquire.base_array_cnt = j;
2205
2206
2207 cdm_acquire.id = CAM_CDM_VIRTUAL;
2208 cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
2209 rc = cam_cdm_acquire(&cdm_acquire);
2210 if (rc) {
2211 CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
2212 goto free_ctx;
2213 }
2214
2215 CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
2216 cdm_acquire.handle);
2217 ife_ctx->cdm_handle = cdm_acquire.handle;
2218 ife_ctx->cdm_ops = cdm_acquire.ops;
2219
2220 isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
2221
2222 /* acquire HW resources */
2223 for (i = 0; i < acquire_args->num_acq; i++) {
2224 if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
2225 continue;
2226
Raja Mallikfe46d932019-02-12 20:34:07 +05302227 CAM_DBG(CAM_ISP, "acquire no = %d total = %d", i,
2228 acquire_args->num_acq);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302229 CAM_DBG(CAM_ISP,
2230 "start copy from user handle %lld with len = %d",
2231 isp_resource[i].res_hdl,
2232 isp_resource[i].length);
2233
2234 in_port_length = sizeof(struct cam_isp_in_port_info);
2235
2236 if (in_port_length > isp_resource[i].length) {
2237 CAM_ERR(CAM_ISP, "buffer size is not enough");
2238 rc = -EINVAL;
2239 goto free_res;
2240 }
2241
2242 in_port = memdup_user(
2243 u64_to_user_ptr(isp_resource[i].res_hdl),
2244 isp_resource[i].length);
2245 if (!IS_ERR(in_port)) {
2246 if (in_port->num_out_res > CAM_IFE_HW_OUT_RES_MAX) {
2247 CAM_ERR(CAM_ISP, "too many output res %d",
2248 in_port->num_out_res);
2249 rc = -EINVAL;
2250 kfree(in_port);
2251 goto free_res;
2252 }
2253
2254 in_port_length = sizeof(struct cam_isp_in_port_info) +
2255 (in_port->num_out_res - 1) *
2256 sizeof(struct cam_isp_out_port_info);
2257 if (in_port_length > isp_resource[i].length) {
2258 CAM_ERR(CAM_ISP, "buffer size is not enough");
2259 rc = -EINVAL;
2260 kfree(in_port);
2261 goto free_res;
2262 }
2263
2264 rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
2265 &num_pix_port_per_in, &num_rdi_port_per_in);
2266 total_pix_port += num_pix_port_per_in;
2267 total_rdi_port += num_rdi_port_per_in;
2268
2269 kfree(in_port);
2270 if (rc) {
2271 CAM_ERR(CAM_ISP, "can not acquire resource");
2272 goto free_res;
2273 }
2274 } else {
2275 CAM_ERR(CAM_ISP,
2276 "Copy from user failed with in_port = %pK",
2277 in_port);
2278 rc = -EFAULT;
2279 goto free_res;
2280 }
2281 }
2282
2283 /* Check whether context has only RDI resource */
2284 if (!total_pix_port) {
2285 ife_ctx->is_rdi_only_context = 1;
2286 CAM_DBG(CAM_ISP, "RDI only context");
2287 }
2288
2289 /* Process base info */
2290 rc = cam_ife_mgr_process_base_info(ife_ctx);
2291 if (rc) {
2292 CAM_ERR(CAM_ISP, "Process base info failed");
2293 goto free_res;
2294 }
2295
2296 acquire_args->ctxt_to_hw_map = ife_ctx;
2297 ife_ctx->ctx_in_use = 1;
2298
2299 cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302300 CAM_DBG(CAM_ISP, "Exit...(success)");
2301
2302 return 0;
Raja Mallike3ed1a32019-08-22 17:12:32 +05302303
Raja Mallikc7e256f2018-12-06 17:36:28 +05302304free_res:
Raja Mallike3ed1a32019-08-22 17:12:32 +05302305 /*Dump all the current acquired resources */
2306 cam_ife_hw_mgr_dump_all_ctx(ife_ctx);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302307 cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
2308 cam_cdm_release(ife_ctx->cdm_handle);
2309free_ctx:
2310 cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
2311err:
2312 CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
2313 return rc;
2314}
2315
2316/* entry function: acquire_hw */
2317static int cam_ife_mgr_acquire(void *hw_mgr_priv,
2318 void *acquire_hw_args)
2319{
2320 struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
2321 int rc = -1;
2322
2323 CAM_DBG(CAM_ISP, "Enter...");
2324
2325 if (!acquire_args || acquire_args->num_acq <= 0) {
2326 CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
2327 return -EINVAL;
2328 }
2329
2330 if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT)
2331 rc = cam_ife_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args);
2332 else
2333 rc = cam_ife_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args);
2334
2335 CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
2336 return rc;
2337}
2338
2339
2340static int cam_isp_blob_bw_update(
2341 struct cam_isp_bw_config *bw_config,
Raja Mallik8b88b232019-04-04 14:32:27 +05302342 struct cam_isp_bw_config_ab *bw_config_ab,
Raja Mallikc7e256f2018-12-06 17:36:28 +05302343 struct cam_ife_hw_mgr_ctx *ctx)
2344{
2345 struct cam_ife_hw_mgr_res *hw_mgr_res;
2346 struct cam_hw_intf *hw_intf;
2347 struct cam_vfe_bw_update_args bw_upd_args;
2348 uint64_t cam_bw_bps = 0;
2349 uint64_t ext_bw_bps = 0;
Raja Mallik8b88b232019-04-04 14:32:27 +05302350 uint64_t ext_bw_bps_ab = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302351 int rc = -EINVAL;
2352 uint32_t i;
2353 bool camif_l_bw_updated = false;
2354 bool camif_r_bw_updated = false;
2355
2356 CAM_DBG(CAM_PERF,
2357 "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
2358 "right cam_bw_bps=%llu ext_bw_bps=%llu",
2359 bw_config->usage_type,
2360 bw_config->left_pix_vote.cam_bw_bps,
2361 bw_config->left_pix_vote.ext_bw_bps,
2362 bw_config->right_pix_vote.cam_bw_bps,
2363 bw_config->right_pix_vote.ext_bw_bps);
2364
2365 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
2366 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
2367 if (!hw_mgr_res->hw_res[i])
2368 continue;
2369
Raja Mallikfe46d932019-02-12 20:34:07 +05302370 if ((hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
2371 || (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_RD))
Raja Mallikc7e256f2018-12-06 17:36:28 +05302372 if (i == CAM_ISP_HW_SPLIT_LEFT) {
2373 if (camif_l_bw_updated)
2374 continue;
2375
2376 cam_bw_bps =
2377 bw_config->left_pix_vote.cam_bw_bps;
2378 ext_bw_bps =
2379 bw_config->left_pix_vote.ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302380 ext_bw_bps_ab =
2381 bw_config_ab->left_pix_vote_ab;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302382
2383 camif_l_bw_updated = true;
2384 } else {
2385 if (camif_r_bw_updated)
2386 continue;
2387
2388 cam_bw_bps =
2389 bw_config->right_pix_vote.cam_bw_bps;
2390 ext_bw_bps =
2391 bw_config->right_pix_vote.ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302392 ext_bw_bps_ab =
2393 bw_config_ab->right_pix_vote_ab;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302394
2395 camif_r_bw_updated = true;
2396 }
2397 else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
2398 && (hw_mgr_res->res_id <=
2399 CAM_ISP_HW_VFE_IN_RDI3)) {
2400 uint32_t idx = hw_mgr_res->res_id -
2401 CAM_ISP_HW_VFE_IN_RDI0;
2402 if (idx >= bw_config->num_rdi)
2403 continue;
2404
2405 cam_bw_bps =
2406 bw_config->rdi_vote[idx].cam_bw_bps;
2407 ext_bw_bps =
2408 bw_config->rdi_vote[idx].ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302409 ext_bw_bps_ab =
2410 bw_config_ab->rdi_vote_ab[idx];
Raja Mallikc7e256f2018-12-06 17:36:28 +05302411 } else if (hw_mgr_res->res_id ==
2412 CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
2413 if (i == CAM_ISP_HW_SPLIT_LEFT) {
2414 if (camif_l_bw_updated)
2415 continue;
2416
2417 cam_bw_bps =
2418 bw_config->left_pix_vote.cam_bw_bps;
2419 ext_bw_bps =
2420 bw_config->left_pix_vote.ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302421 ext_bw_bps_ab =
2422 bw_config_ab->left_pix_vote_ab;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302423
2424 camif_l_bw_updated = true;
2425 } else {
2426 if (camif_r_bw_updated)
2427 continue;
2428
2429 cam_bw_bps =
2430 bw_config->right_pix_vote.cam_bw_bps;
2431 ext_bw_bps =
2432 bw_config->right_pix_vote.ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302433 ext_bw_bps_ab =
2434 bw_config_ab->right_pix_vote_ab;
2435
Raja Mallikc7e256f2018-12-06 17:36:28 +05302436
2437 camif_r_bw_updated = true;
2438 }
2439 } else
2440 if (hw_mgr_res->hw_res[i]) {
2441 CAM_ERR(CAM_ISP, "Invalid res_id %u",
2442 hw_mgr_res->res_id);
2443 rc = -EINVAL;
2444 return rc;
2445 }
2446
2447 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
2448 if (hw_intf && hw_intf->hw_ops.process_cmd) {
2449 bw_upd_args.node_res =
2450 hw_mgr_res->hw_res[i];
2451
2452 bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
2453 bw_upd_args.external_bw_bytes = ext_bw_bps;
Raja Mallik8b88b232019-04-04 14:32:27 +05302454 bw_upd_args.external_bw_bytes_ab =
2455 ext_bw_bps_ab;
Raja Mallikc7e256f2018-12-06 17:36:28 +05302456
2457 rc = hw_intf->hw_ops.process_cmd(
2458 hw_intf->hw_priv,
2459 CAM_ISP_HW_CMD_BW_UPDATE,
2460 &bw_upd_args,
2461 sizeof(struct cam_vfe_bw_update_args));
2462 if (rc)
2463 CAM_ERR(CAM_ISP, "BW Update failed");
2464 } else
2465 CAM_WARN(CAM_ISP, "NULL hw_intf!");
2466 }
2467 }
2468
2469 return rc;
2470}
2471
2472/* entry function: config_hw */
2473static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
2474 void *config_hw_args)
2475{
2476 int rc = -1, i;
2477 struct cam_hw_config_args *cfg;
2478 struct cam_hw_update_entry *cmd;
2479 struct cam_cdm_bl_request *cdm_cmd;
2480 struct cam_ife_hw_mgr_ctx *ctx;
2481 struct cam_isp_prepare_hw_update_data *hw_update_data;
2482
2483 CAM_DBG(CAM_ISP, "Enter");
2484 if (!hw_mgr_priv || !config_hw_args) {
2485 CAM_ERR(CAM_ISP, "Invalid arguments");
2486 return -EINVAL;
2487 }
2488
2489 cfg = config_hw_args;
2490 ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
2491 if (!ctx) {
2492 CAM_ERR(CAM_ISP, "Invalid context is used");
2493 return -EPERM;
2494 }
2495
2496 if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
2497 CAM_ERR(CAM_ISP, "Invalid context parameters");
2498 return -EPERM;
2499 }
2500 if (atomic_read(&ctx->overflow_pending))
2501 return -EINVAL;
2502
2503 hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv;
2504
2505 for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
Raja Mallik8b88b232019-04-04 14:32:27 +05302506 CAM_DBG(CAM_ISP, "hw_update_data->bw_config_valid[%d]:%d", i,
2507 hw_update_data->bw_config_valid[i]);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302508 if (hw_update_data->bw_config_valid[i] == true) {
2509 rc = cam_isp_blob_bw_update(
2510 (struct cam_isp_bw_config *)
Raja Mallik8b88b232019-04-04 14:32:27 +05302511 &hw_update_data->bw_config[i],
2512 (struct cam_isp_bw_config_ab *)
2513 &hw_update_data->bw_config_ab[i],
2514 ctx);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302515 if (rc)
2516 CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
2517 }
2518 }
2519
Raja Mallikfe46d932019-02-12 20:34:07 +05302520 CAM_DBG(CAM_ISP,
2521 "Enter ctx id:%d num_hw_upd_entries %d request id: %llu",
2522 ctx->ctx_index, cfg->num_hw_update_entries, cfg->request_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302523
2524 if (cfg->num_hw_update_entries > 0) {
2525 cdm_cmd = ctx->cdm_cmd;
2526 cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries;
2527 cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
2528 cdm_cmd->flag = true;
2529 cdm_cmd->userdata = ctx;
2530 cdm_cmd->cookie = cfg->request_id;
2531
2532 for (i = 0 ; i <= cfg->num_hw_update_entries; i++) {
2533 cmd = (cfg->hw_update_entries + i);
2534 cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
2535 cdm_cmd->cmd[i].offset = cmd->offset;
2536 cdm_cmd->cmd[i].len = cmd->len;
2537 }
2538
2539 if (cfg->init_packet)
2540 init_completion(&ctx->config_done_complete);
2541
2542 CAM_DBG(CAM_ISP, "Submit to CDM");
2543 rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
2544 if (rc) {
2545 CAM_ERR(CAM_ISP, "Failed to apply the configs");
2546 return rc;
2547 }
2548
2549 if (cfg->init_packet) {
2550 rc = wait_for_completion_timeout(
2551 &ctx->config_done_complete,
2552 msecs_to_jiffies(30));
2553 if (rc <= 0) {
2554 CAM_ERR(CAM_ISP,
Raja Mallikfe46d932019-02-12 20:34:07 +05302555 "config done completion timeout for req_id=%llu rc=%d ctx_index %d",
2556 cfg->request_id, rc, ctx->ctx_index);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302557 if (rc == 0)
2558 rc = -ETIMEDOUT;
2559 } else {
2560 rc = 0;
2561 CAM_DBG(CAM_ISP,
Raja Mallikfe46d932019-02-12 20:34:07 +05302562 "config done Success for req_id=%llu ctx_index %d",
2563 cfg->request_id, ctx->ctx_index);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302564 }
2565 }
2566 } else {
2567 CAM_ERR(CAM_ISP, "No commands to config");
2568 }
Raja Mallikfe46d932019-02-12 20:34:07 +05302569 CAM_DBG(CAM_ISP, "Exit: Config Done: %llu", cfg->request_id);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302570
2571 return rc;
2572}
2573
2574static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
2575{
2576 int rc = 0;
2577 struct cam_hw_stop_args *stop_args = stop_hw_args;
2578 struct cam_ife_hw_mgr_res *hw_mgr_res;
2579 struct cam_ife_hw_mgr_ctx *ctx;
2580 uint32_t i, master_base_idx = 0;
2581
2582 if (!stop_hw_args) {
2583 CAM_ERR(CAM_ISP, "Invalid arguments");
2584 return -EINVAL;
2585 }
2586 ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
2587 if (!ctx || !ctx->ctx_in_use) {
2588 CAM_ERR(CAM_ISP, "Invalid context is used");
2589 return -EPERM;
2590 }
2591
2592 CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
2593 ctx->ctx_index);
2594
2595 if (!ctx->num_base) {
2596 CAM_ERR(CAM_ISP, "Number of bases are zero");
2597 return -EINVAL;
2598 }
2599
2600 /* get master base index first */
2601 for (i = 0; i < ctx->num_base; i++) {
2602 if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
2603 master_base_idx = ctx->base[i].idx;
2604 break;
2605 }
2606 }
2607
2608 if (i == ctx->num_base)
2609 master_base_idx = ctx->base[0].idx;
2610
2611
2612 /* stop the master CIDs first */
2613 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
2614 master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
2615
2616 /* stop rest of the CIDs */
2617 for (i = 0; i < ctx->num_base; i++) {
2618 if (i == master_base_idx)
2619 continue;
2620 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
2621 ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
2622 }
2623
2624 /* stop the master CSID path first */
2625 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
2626 master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
2627
2628 /* Stop rest of the CSID paths */
2629 for (i = 0; i < ctx->num_base; i++) {
2630 if (i == master_base_idx)
2631 continue;
2632
2633 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
2634 ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
2635 }
2636
2637 /* IFE mux in resources */
2638 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
2639 cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
2640 }
2641
Raja Mallikfe46d932019-02-12 20:34:07 +05302642 /* IFE bus rd resources */
2643 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
2644 cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
2645 }
2646
Raja Mallikc7e256f2018-12-06 17:36:28 +05302647 /* IFE out resources */
2648 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
2649 cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
2650
2651
2652 /* Stop tasklet for context */
2653 cam_tasklet_stop(ctx->common.tasklet_info);
2654 CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
2655 ctx->ctx_index, rc);
2656
2657 return rc;
2658}
2659
2660static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
2661 enum cam_vfe_bw_control_action action)
2662{
2663 struct cam_ife_hw_mgr_res *hw_mgr_res;
2664 struct cam_hw_intf *hw_intf;
2665 struct cam_vfe_bw_control_args bw_ctrl_args;
2666 int rc = -EINVAL;
2667 uint32_t i;
2668
2669 CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
2670
2671 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
2672 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
2673 if (!hw_mgr_res->hw_res[i])
2674 continue;
2675
2676 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
2677 if (hw_intf && hw_intf->hw_ops.process_cmd) {
2678 bw_ctrl_args.node_res =
2679 hw_mgr_res->hw_res[i];
2680 bw_ctrl_args.action = action;
2681
2682 rc = hw_intf->hw_ops.process_cmd(
2683 hw_intf->hw_priv,
2684 CAM_ISP_HW_CMD_BW_CONTROL,
2685 &bw_ctrl_args,
2686 sizeof(struct cam_vfe_bw_control_args));
2687 if (rc)
2688 CAM_ERR(CAM_ISP, "BW Update failed");
2689 } else
2690 CAM_WARN(CAM_ISP, "NULL hw_intf!");
2691 }
2692 }
2693
2694 return rc;
2695}
2696
2697static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
2698{
2699 return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
2700}
2701
Raja Mallike3ed1a32019-08-22 17:12:32 +05302702static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
2703{
2704 return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
2705}
2706
Raja Mallikc7e256f2018-12-06 17:36:28 +05302707/* entry function: stop_hw */
2708static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
2709{
2710 int rc = 0;
2711 struct cam_hw_stop_args *stop_args = stop_hw_args;
2712 struct cam_isp_stop_args *stop_isp;
2713 struct cam_ife_hw_mgr_res *hw_mgr_res;
2714 struct cam_ife_hw_mgr_ctx *ctx;
2715 enum cam_ife_csid_halt_cmd csid_halt_type;
2716 uint32_t i, master_base_idx = 0;
2717
2718 if (!hw_mgr_priv || !stop_hw_args) {
2719 CAM_ERR(CAM_ISP, "Invalid arguments");
2720 return -EINVAL;
2721 }
2722
2723 ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
2724 if (!ctx || !ctx->ctx_in_use) {
2725 CAM_ERR(CAM_ISP, "Invalid context is used");
2726 return -EPERM;
2727 }
2728
2729 CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
2730 stop_isp = (struct cam_isp_stop_args *)stop_args->args;
2731
2732 if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_IMMEDIATELY) &&
2733 (stop_isp->stop_only)) {
2734 CAM_ERR(CAM_ISP, "Invalid params hw_stop_cmd:%d stop_only:%d",
2735 stop_isp->hw_stop_cmd, stop_isp->stop_only);
2736 return -EPERM;
2737 }
2738
2739 /* Set the csid halt command */
2740 if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
2741 csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY;
2742 else
2743 csid_halt_type = CAM_CSID_HALT_IMMEDIATELY;
2744
2745 /* Note:stop resource will remove the irq mask from the hardware */
2746
2747 if (!ctx->num_base) {
2748 CAM_ERR(CAM_ISP, "number of bases are zero");
2749 return -EINVAL;
2750 }
2751
2752 CAM_DBG(CAM_ISP, "Halting CSIDs");
2753
Raja Mallikc7e256f2018-12-06 17:36:28 +05302754 /* get master base index first */
2755 for (i = 0; i < ctx->num_base; i++) {
2756 if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
2757 master_base_idx = ctx->base[i].idx;
2758 break;
2759 }
2760 }
2761
Raja Mallikc7e256f2018-12-06 17:36:28 +05302762 /*
2763 * If Context does not have PIX resources and has only RDI resource
2764 * then take the first base index.
2765 */
2766 if (i == ctx->num_base)
2767 master_base_idx = ctx->base[0].idx;
2768 CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
2769
2770 /* Stop the master CSID path first */
2771 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
Raja Mallik130c3602019-07-22 12:21:54 +05302772 master_base_idx, csid_halt_type);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302773
2774 /* stop rest of the CSID paths */
2775 for (i = 0; i < ctx->num_base; i++) {
2776 if (ctx->base[i].idx == master_base_idx)
2777 continue;
2778 CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d",
2779 ctx->base[i].idx, i, master_base_idx);
2780
2781 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
Raja Mallik130c3602019-07-22 12:21:54 +05302782 ctx->base[i].idx, csid_halt_type);
Raja Mallikc7e256f2018-12-06 17:36:28 +05302783 }
2784
2785 CAM_DBG(CAM_ISP, "Stopping master CID idx %d", master_base_idx);
2786
2787 /* Stop the master CIDs first */
2788 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
2789 master_base_idx, csid_halt_type);
2790
2791 /* stop rest of the CIDs */
2792 for (i = 0; i < ctx->num_base; i++) {
2793 if (ctx->base[i].idx == master_base_idx)
2794 continue;
2795 CAM_DBG(CAM_ISP, "Stopping CID idx %d i %d master %d",
2796 ctx->base[i].idx, i, master_base_idx);
2797 cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
2798 ctx->base[i].idx, csid_halt_type);
2799 }
2800
Raja Mallikfe46d932019-02-12 20:34:07 +05302801 CAM_DBG(CAM_ISP, "Going to stop IFE Out");
2802
2803 /* IFE out resources */
2804 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
2805 cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
2806
2807 /* IFE bus rd resources */
2808 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
2809 cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
2810 }
2811
2812 CAM_DBG(CAM_ISP, "Going to stop IFE Mux");
2813
2814 /* IFE mux in resources */
2815 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
2816 cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
2817 }
2818
2819 cam_tasklet_stop(ctx->common.tasklet_info);
2820
Raja Mallikc7e256f2018-12-06 17:36:28 +05302821 cam_ife_mgr_pause_hw(ctx);
2822
2823 if (stop_isp->stop_only)
2824 goto end;
2825
2826 if (cam_cdm_stream_off(ctx->cdm_handle))
2827 CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
2828
2829 cam_ife_hw_mgr_deinit_hw(ctx);
2830 CAM_DBG(CAM_ISP,
2831 "Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
2832
2833 mutex_lock(&g_ife_hw_mgr.ctx_mutex);
2834 if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
2835 rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
2836 if (rc) {
2837 CAM_ERR(CAM_ISP,
2838 "SAFE SCM call failed:Check TZ/HYP dependency");
2839 rc = 0;
2840 }
2841 }
2842 mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
2843
2844end:
2845 return rc;
2846}
2847
2848static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
2849 uint32_t hw_idx)
2850{
2851 uint32_t i = 0;
2852 struct cam_hw_intf *vfe_hw_intf;
2853 uint32_t vfe_reset_type;
2854
2855 if (!hw_mgr) {
2856 CAM_DBG(CAM_ISP, "Invalid arguments");
2857 return -EINVAL;
2858 }
2859 /* Reset VFE HW*/
2860 vfe_reset_type = CAM_VFE_HW_RESET_HW;
2861
2862 for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
2863 if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
2864 continue;
2865 CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
2866 vfe_hw_intf = hw_mgr->ife_devices[i];
2867 vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
2868 &vfe_reset_type, sizeof(vfe_reset_type));
2869 break;
2870 }
2871
2872 CAM_DBG(CAM_ISP, "Exit Successfully");
2873 return 0;
2874}
2875
2876static int cam_ife_mgr_restart_hw(void *start_hw_args)
2877{
2878 int rc = -1;
2879 struct cam_hw_start_args *start_args = start_hw_args;
2880 struct cam_ife_hw_mgr_ctx *ctx;
2881 struct cam_ife_hw_mgr_res *hw_mgr_res;
2882 uint32_t i;
2883
2884 if (!start_hw_args) {
2885 CAM_ERR(CAM_ISP, "Invalid arguments");
2886 return -EINVAL;
2887 }
2888
2889 ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
2890 if (!ctx || !ctx->ctx_in_use) {
2891 CAM_ERR(CAM_ISP, "Invalid context is used");
2892 return -EPERM;
2893 }
2894
2895 CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
2896
2897 cam_tasklet_start(ctx->common.tasklet_info);
2898
2899 /* start the IFE out devices */
2900 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
2901 rc = cam_ife_hw_mgr_start_hw_res(
2902 &ctx->res_list_ife_out[i], ctx);
2903 if (rc) {
2904 CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
2905 goto err;
2906 }
2907 }
2908
2909 CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
Raja Mallikfe46d932019-02-12 20:34:07 +05302910
2911 /* Start IFE BUS RD device */
2912 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
2913 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
2914 if (rc) {
2915 CAM_ERR(CAM_ISP, "Can not start IFE BUS RD (%d)",
2916 hw_mgr_res->res_id);
2917 goto err;
2918 }
2919 }
2920
Raja Mallikc7e256f2018-12-06 17:36:28 +05302921 /* Start the IFE mux in devices */
2922 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
2923 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
2924 if (rc) {
2925 CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
2926 hw_mgr_res->res_id);
2927 goto err;
2928 }
2929 }
2930
2931 CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
2932 /* Start the IFE CSID HW devices */
2933 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
2934 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
2935 if (rc) {
2936 CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
2937 hw_mgr_res->res_id);
2938 goto err;
2939 }
2940 }
2941
2942 CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
2943 /* Start IFE root node: do nothing */
2944 CAM_DBG(CAM_ISP, "Exit...(success)");
2945 return 0;
2946
2947err:
2948 cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
2949 CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
2950 return rc;
2951}
2952
2953static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
2954{
2955 int rc = -1;
2956 struct cam_isp_start_args *start_isp = start_hw_args;
2957 struct cam_hw_stop_args stop_args;
2958 struct cam_isp_stop_args stop_isp;
2959 struct cam_ife_hw_mgr_ctx *ctx;
2960 struct cam_ife_hw_mgr_res *hw_mgr_res;
2961 struct cam_isp_resource_node *rsrc_node = NULL;
2962 uint32_t i, camif_debug;
2963
2964 if (!hw_mgr_priv || !start_isp) {
2965 CAM_ERR(CAM_ISP, "Invalid arguments");
2966 return -EINVAL;
2967 }
2968
2969 ctx = (struct cam_ife_hw_mgr_ctx *)
2970 start_isp->hw_config.ctxt_to_hw_map;
2971 if (!ctx || !ctx->ctx_in_use) {
2972 CAM_ERR(CAM_ISP, "Invalid context is used");
2973 return -EPERM;
2974 }
2975
2976 if ((!ctx->init_done) && start_isp->start_only) {
2977 CAM_ERR(CAM_ISP, "Invalid args init_done %d start_only %d",
2978 ctx->init_done, start_isp->start_only);
2979 return -EINVAL;
2980 }
2981
2982 CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
2983 ctx->ctx_index);
2984
2985 /* update Bandwidth should be done at the hw layer */
2986
2987 cam_tasklet_start(ctx->common.tasklet_info);
2988
2989 if (ctx->init_done && start_isp->start_only)
2990 goto start_only;
2991
2992 /* set current csid debug information to CSID HW */
2993 for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
2994 if (g_ife_hw_mgr.csid_devices[i])
2995 rc = g_ife_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
2996 g_ife_hw_mgr.csid_devices[i]->hw_priv,
2997 CAM_IFE_CSID_SET_CSID_DEBUG,
2998 &g_ife_hw_mgr.debug_cfg.csid_debug,
2999 sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
3000 }
3001
3002 camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug;
3003 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
3004 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3005 if (!hw_mgr_res->hw_res[i])
3006 continue;
3007
3008 rsrc_node = hw_mgr_res->hw_res[i];
3009 if (rsrc_node->process_cmd && (rsrc_node->res_id ==
3010 CAM_ISP_HW_VFE_IN_CAMIF)) {
3011 rc = hw_mgr_res->hw_res[i]->process_cmd(
3012 hw_mgr_res->hw_res[i],
3013 CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
3014 &camif_debug,
3015 sizeof(camif_debug));
3016 }
3017 }
3018 }
3019
3020 rc = cam_ife_hw_mgr_init_hw(ctx);
3021 if (rc) {
3022 CAM_ERR(CAM_ISP, "Init failed");
3023 goto tasklet_stop;
3024 }
3025
3026 ctx->init_done = true;
3027
3028 mutex_lock(&g_ife_hw_mgr.ctx_mutex);
3029 if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
3030 rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE);
3031 if (rc) {
3032 CAM_ERR(CAM_ISP,
3033 "SAFE SCM call failed:Check TZ/HYP dependency");
3034 rc = -EFAULT;
3035 goto deinit_hw;
3036 }
3037 }
3038 mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
3039
3040 CAM_DBG(CAM_ISP, "start cdm interface");
3041 rc = cam_cdm_stream_on(ctx->cdm_handle);
3042 if (rc) {
3043 CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
3044 ctx->cdm_handle);
3045 goto safe_disable;
3046 }
3047
3048 /* Apply initial configuration */
3049 CAM_DBG(CAM_ISP, "Config HW");
3050 rc = cam_ife_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config);
3051 if (rc) {
3052 CAM_ERR(CAM_ISP, "Config HW failed");
3053 goto cdm_streamoff;
3054 }
3055
3056start_only:
3057
3058 CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
3059 ctx->ctx_index);
Raja Mallike3ed1a32019-08-22 17:12:32 +05303060 if (start_isp->start_only)
3061 cam_ife_mgr_resume_hw(ctx);
3062
Raja Mallikc7e256f2018-12-06 17:36:28 +05303063 /* start the IFE out devices */
3064 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
3065 rc = cam_ife_hw_mgr_start_hw_res(
3066 &ctx->res_list_ife_out[i], ctx);
3067 if (rc) {
3068 CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
3069 i);
3070 goto err;
3071 }
3072 }
3073
Raja Mallikfe46d932019-02-12 20:34:07 +05303074 CAM_DBG(CAM_ISP, "START IFE BUS RD ... in ctx id:%d",
3075 ctx->ctx_index);
3076 /* Start the IFE mux in devices */
3077 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
3078 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
3079 if (rc) {
3080 CAM_ERR(CAM_ISP, "Can not start IFE BUS RD (%d)",
3081 hw_mgr_res->res_id);
3082 goto err;
3083 }
3084 }
3085
Raja Mallikc7e256f2018-12-06 17:36:28 +05303086 CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
3087 ctx->ctx_index);
3088 /* Start the IFE mux in devices */
3089 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
3090 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
3091 if (rc) {
3092 CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
3093 hw_mgr_res->res_id);
3094 goto err;
3095 }
3096 }
3097
3098 CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
3099 ctx->ctx_index);
3100 /* Start the IFE CSID HW devices */
3101 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
3102 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
3103 if (rc) {
3104 CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
3105 hw_mgr_res->res_id);
3106 goto err;
3107 }
3108 }
3109
3110 CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
3111 ctx->ctx_index);
3112 /* Start the IFE CID HW devices */
3113 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
3114 rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
3115 if (rc) {
3116 CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
3117 hw_mgr_res->res_id);
3118 goto err;
3119 }
3120 }
3121
Raja Mallik477ca242019-06-21 14:23:32 +05303122 ctx->dual_ife_irq_mismatch_cnt = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05303123 /* Start IFE root node: do nothing */
3124 CAM_DBG(CAM_ISP, "Start success for ctx id:%d", ctx->ctx_index);
3125
3126 return 0;
3127
3128err:
3129 stop_isp.stop_only = false;
3130 stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
3131 stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map;
3132 stop_args.args = (void *)(&stop_isp);
3133
3134 cam_ife_mgr_stop_hw(hw_mgr_priv, &stop_args);
3135 CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
3136 return rc;
3137
3138cdm_streamoff:
3139 cam_cdm_stream_off(ctx->cdm_handle);
3140
3141safe_disable:
3142 cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
3143
3144deinit_hw:
3145 cam_ife_hw_mgr_deinit_hw(ctx);
Raja Mallikc7e256f2018-12-06 17:36:28 +05303146
3147tasklet_stop:
3148 cam_tasklet_stop(ctx->common.tasklet_info);
3149
3150 return rc;
3151}
3152
3153static int cam_ife_mgr_read(void *hw_mgr_priv, void *read_args)
3154{
3155 return -EPERM;
3156}
3157
3158static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args)
3159{
3160 return -EPERM;
3161}
3162
Raja Mallike3ed1a32019-08-22 17:12:32 +05303163static int cam_ife_mgr_reset(void *hw_mgr_priv, void *hw_reset_args)
3164{
3165 struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
3166 struct cam_hw_reset_args *reset_args = hw_reset_args;
3167 struct cam_ife_hw_mgr_ctx *ctx;
3168 struct cam_ife_hw_mgr_res *hw_mgr_res;
3169 uint32_t i;
3170 int rc = 0;
3171
3172 if (!hw_mgr_priv || !hw_reset_args) {
3173 CAM_ERR(CAM_ISP, "Invalid arguments");
3174 return -EINVAL;
3175 }
3176
3177 ctx = (struct cam_ife_hw_mgr_ctx *)reset_args->ctxt_to_hw_map;
3178 if (!ctx || !ctx->ctx_in_use) {
3179 CAM_ERR(CAM_ISP, "Invalid context is used");
3180 return -EPERM;
3181 }
3182
3183 CAM_DBG(CAM_ISP, "reset csid and vfe hw");
3184 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
3185 list) {
3186 rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
3187 if (rc) {
3188 CAM_ERR(CAM_ISP, "Failed RESET (%d) rc:%d",
3189 hw_mgr_res->res_id, rc);
3190 goto end;
3191 }
3192 }
3193
3194 for (i = 0; i < ctx->num_base; i++)
3195 rc = cam_ife_mgr_reset_vfe_hw(hw_mgr, ctx->base[i].idx);
3196
3197end:
3198 return rc;
3199}
3200
Raja Mallikc7e256f2018-12-06 17:36:28 +05303201static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
3202 void *release_hw_args)
3203{
3204 int rc = 0;
3205 struct cam_hw_release_args *release_args = release_hw_args;
3206 struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
3207 struct cam_ife_hw_mgr_ctx *ctx;
3208 uint32_t i;
3209
3210 if (!hw_mgr_priv || !release_hw_args) {
3211 CAM_ERR(CAM_ISP, "Invalid arguments");
3212 return -EINVAL;
3213 }
3214
3215 ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
3216 if (!ctx || !ctx->ctx_in_use) {
3217 CAM_ERR(CAM_ISP, "Invalid context is used");
3218 return -EPERM;
3219 }
3220
3221 CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
3222 ctx->ctx_index);
3223
3224 if (ctx->init_done)
3225 cam_ife_hw_mgr_deinit_hw(ctx);
3226
3227 /* we should called the stop hw before this already */
3228 cam_ife_hw_mgr_release_hw_for_ctx(ctx);
3229
3230 /* reset base info */
3231 ctx->num_base = 0;
3232 memset(ctx->base, 0, sizeof(ctx->base));
3233
3234 /* release cdm handle */
3235 cam_cdm_release(ctx->cdm_handle);
3236
3237 /* clean context */
3238 list_del_init(&ctx->list);
3239 ctx->ctx_in_use = 0;
3240 ctx->is_rdi_only_context = 0;
3241 ctx->cdm_handle = 0;
3242 ctx->cdm_ops = NULL;
Raja Mallik477ca242019-06-21 14:23:32 +05303243 ctx->dual_ife_irq_mismatch_cnt = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05303244 atomic_set(&ctx->overflow_pending, 0);
3245 for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
3246 ctx->sof_cnt[i] = 0;
3247 ctx->eof_cnt[i] = 0;
3248 ctx->epoch_cnt[i] = 0;
3249 }
3250 CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
3251 ctx->ctx_index);
3252 cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
3253 return rc;
3254}
3255
Raja Mallikfe46d932019-02-12 20:34:07 +05303256static int cam_isp_blob_fe_update(
3257 uint32_t blob_type,
3258 struct cam_isp_generic_blob_info *blob_info,
3259 struct cam_fe_config *fe_config,
3260 struct cam_hw_prepare_update_args *prepare)
3261{
3262 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3263 struct cam_ife_hw_mgr_res *hw_mgr_res;
3264 struct cam_hw_intf *hw_intf;
3265 int rc = -EINVAL;
3266 uint32_t i;
3267 struct cam_vfe_fe_update_args fe_upd_args;
3268
3269 ctx = prepare->ctxt_to_hw_map;
3270
3271 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) {
3272 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3273 if (!hw_mgr_res->hw_res[i])
3274 continue;
3275
3276 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3277 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3278 fe_upd_args.node_res =
3279 hw_mgr_res->hw_res[i];
3280
3281 memcpy(&fe_upd_args.fe_config, fe_config,
3282 sizeof(struct cam_fe_config));
3283
3284 rc = hw_intf->hw_ops.process_cmd(
3285 hw_intf->hw_priv,
3286 CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
3287 &fe_upd_args,
3288 sizeof(
3289 struct cam_fe_config));
3290 if (rc)
3291 CAM_ERR(CAM_ISP, "fs Update failed");
3292 } else
3293 CAM_WARN(CAM_ISP, "NULL hw_intf!");
3294 }
3295 }
3296
3297 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
3298 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3299 if (!hw_mgr_res->hw_res[i])
3300 continue;
3301
3302 if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_RD)
3303 continue;
3304
3305 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3306 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3307 fe_upd_args.node_res =
3308 hw_mgr_res->hw_res[i];
3309
3310 memcpy(&fe_upd_args.fe_config, fe_config,
3311 sizeof(struct cam_fe_config));
3312
3313 rc = hw_intf->hw_ops.process_cmd(
3314 hw_intf->hw_priv,
3315 CAM_ISP_HW_CMD_FE_UPDATE_IN_RD,
3316 &fe_upd_args,
3317 sizeof(
3318 struct cam_vfe_fe_update_args));
3319 if (rc)
3320 CAM_ERR(CAM_ISP, "fe Update failed");
3321 } else
3322 CAM_WARN(CAM_ISP, "NULL hw_intf!");
3323 }
3324 }
3325 return rc;
3326}
3327
Raja Mallike3ed1a32019-08-22 17:12:32 +05303328static int cam_isp_blob_fps_config(
3329 uint32_t blob_type,
3330 struct cam_isp_generic_blob_info *blob_info,
3331 struct cam_fps_config *fps_config,
3332 struct cam_hw_prepare_update_args *prepare)
3333{
3334 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3335 struct cam_ife_hw_mgr_res *hw_mgr_res;
3336 struct cam_hw_intf *hw_intf;
3337 struct cam_vfe_fps_config_args fps_config_args;
3338 int rc = -EINVAL;
3339 uint32_t i;
3340
3341 ctx = prepare->ctxt_to_hw_map;
3342
3343 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
3344 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3345 if (!hw_mgr_res->hw_res[i])
3346 continue;
3347
3348 if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
3349 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3350 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3351 fps_config_args.fps =
3352 fps_config->fps;
3353 fps_config_args.node_res =
3354 hw_mgr_res->hw_res[i];
3355
3356 rc = hw_intf->hw_ops.process_cmd(
3357 hw_intf->hw_priv,
3358 CAM_ISP_HW_CMD_FPS_CONFIG,
3359 &fps_config_args,
3360 sizeof(
3361 struct cam_vfe_fps_config_args)
3362 );
3363 if (rc)
3364 CAM_ERR(CAM_ISP,
3365 "Failed fps config:%d",
3366 fps_config->fps);
3367 } else
3368 CAM_WARN(CAM_ISP, "NULL hw_intf!");
3369 }
3370 }
3371 }
3372
3373 return rc;
3374}
3375
Raja Mallikc7e256f2018-12-06 17:36:28 +05303376static int cam_isp_blob_ubwc_update(
3377 uint32_t blob_type,
3378 struct cam_isp_generic_blob_info *blob_info,
3379 struct cam_ubwc_config *ubwc_config,
3380 struct cam_hw_prepare_update_args *prepare)
3381{
3382 struct cam_ubwc_plane_cfg_v1 *ubwc_plane_cfg;
3383 struct cam_kmd_buf_info *kmd_buf_info;
3384 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3385 struct cam_ife_hw_mgr_res *hw_mgr_res;
3386 uint32_t res_id_out, i;
3387 uint32_t total_used_bytes = 0;
3388 uint32_t kmd_buf_remain_size;
3389 uint32_t *cmd_buf_addr;
3390 uint32_t bytes_used = 0;
3391 int num_ent, rc = 0;
3392
3393 ctx = prepare->ctxt_to_hw_map;
3394 if (!ctx) {
3395 CAM_ERR(CAM_ISP, "Invalid ctx");
3396 rc = -EINVAL;
3397 goto end;
3398 }
3399
3400 if ((prepare->num_hw_update_entries + 1) >=
3401 prepare->max_hw_update_entries) {
3402 CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d",
3403 prepare->num_hw_update_entries,
3404 prepare->max_hw_update_entries);
3405 rc = -EINVAL;
3406 goto end;
3407 }
3408
3409 switch (ubwc_config->api_version) {
3410 case CAM_UBWC_CFG_VERSION_1:
3411 CAM_DBG(CAM_ISP, "num_ports= %d", ubwc_config->num_ports);
3412
3413 kmd_buf_info = blob_info->kmd_buf_info;
3414 for (i = 0; i < ubwc_config->num_ports; i++) {
3415 ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg[i][0];
3416 res_id_out = ubwc_plane_cfg->port_type & 0xFF;
3417
3418 CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d", i,
3419 ubwc_plane_cfg->port_type);
3420
3421 if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
3422 CAM_ERR(CAM_ISP, "Invalid port type:%x",
3423 ubwc_plane_cfg->port_type);
3424 rc = -EINVAL;
3425 goto end;
3426 }
3427
3428 if ((kmd_buf_info->used_bytes
3429 + total_used_bytes) < kmd_buf_info->size) {
3430 kmd_buf_remain_size = kmd_buf_info->size -
3431 (kmd_buf_info->used_bytes
3432 + total_used_bytes);
3433 } else {
3434 CAM_ERR(CAM_ISP,
3435 "no free kmd memory for base=%d bytes_used=%u buf_size=%u",
3436 blob_info->base_info->idx, bytes_used,
3437 kmd_buf_info->size);
3438 rc = -ENOMEM;
3439 goto end;
3440 }
3441
3442 cmd_buf_addr = kmd_buf_info->cpu_addr +
3443 kmd_buf_info->used_bytes/4 +
3444 total_used_bytes/4;
3445 hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
3446
3447 if (!hw_mgr_res) {
3448 CAM_ERR(CAM_ISP, "Invalid hw_mgr_res");
3449 rc = -EINVAL;
3450 goto end;
3451 }
3452
3453 rc = cam_isp_add_cmd_buf_update(
3454 hw_mgr_res, blob_type,
3455 blob_type_hw_cmd_map[blob_type],
3456 blob_info->base_info->idx,
3457 (void *)cmd_buf_addr,
3458 kmd_buf_remain_size,
3459 (void *)ubwc_plane_cfg,
3460 &bytes_used);
3461 if (rc < 0) {
3462 CAM_ERR(CAM_ISP,
3463 "Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%x",
3464 blob_info->base_info->idx,
3465 bytes_used,
3466 res_id_out);
3467 goto end;
3468 }
3469
3470 total_used_bytes += bytes_used;
3471 }
3472
3473 if (total_used_bytes) {
3474 /* Update the HW entries */
3475 num_ent = prepare->num_hw_update_entries;
3476 prepare->hw_update_entries[num_ent].handle =
3477 kmd_buf_info->handle;
3478 prepare->hw_update_entries[num_ent].len =
3479 total_used_bytes;
3480 prepare->hw_update_entries[num_ent].offset =
3481 kmd_buf_info->offset;
3482 num_ent++;
3483
3484 kmd_buf_info->used_bytes += total_used_bytes;
3485 kmd_buf_info->offset += total_used_bytes;
3486 prepare->num_hw_update_entries = num_ent;
3487 }
3488 break;
3489 default:
3490 CAM_ERR(CAM_ISP, "Invalid UBWC API Version %d",
3491 ubwc_config->api_version);
3492 rc = -EINVAL;
3493 break;
3494 }
3495end:
3496 return rc;
3497}
3498
3499static int cam_isp_blob_hfr_update(
3500 uint32_t blob_type,
3501 struct cam_isp_generic_blob_info *blob_info,
3502 struct cam_isp_resource_hfr_config *hfr_config,
3503 struct cam_hw_prepare_update_args *prepare)
3504{
3505 struct cam_isp_port_hfr_config *port_hfr_config;
3506 struct cam_kmd_buf_info *kmd_buf_info;
3507 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3508 struct cam_ife_hw_mgr_res *hw_mgr_res;
3509 uint32_t res_id_out, i;
3510 uint32_t total_used_bytes = 0;
3511 uint32_t kmd_buf_remain_size;
3512 uint32_t *cmd_buf_addr;
3513 uint32_t bytes_used = 0;
3514 int num_ent, rc = 0;
3515
3516 ctx = prepare->ctxt_to_hw_map;
3517 CAM_DBG(CAM_ISP, "num_ports= %d",
3518 hfr_config->num_ports);
3519
3520 /* Max one hw entries required for hfr config update */
3521 if (prepare->num_hw_update_entries + 1 >=
3522 prepare->max_hw_update_entries) {
3523 CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
3524 prepare->num_hw_update_entries,
3525 prepare->max_hw_update_entries);
3526 return -EINVAL;
3527 }
3528
3529 kmd_buf_info = blob_info->kmd_buf_info;
3530 for (i = 0; i < hfr_config->num_ports; i++) {
3531 port_hfr_config = &hfr_config->port_hfr_config[i];
3532 res_id_out = port_hfr_config->resource_type & 0xFF;
3533
3534 CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
3535 res_id_out);
3536
3537 if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
3538 CAM_ERR(CAM_ISP, "invalid out restype:%x",
3539 port_hfr_config->resource_type);
3540 return -EINVAL;
3541 }
3542
3543 if ((kmd_buf_info->used_bytes
3544 + total_used_bytes) < kmd_buf_info->size) {
3545 kmd_buf_remain_size = kmd_buf_info->size -
3546 (kmd_buf_info->used_bytes +
3547 total_used_bytes);
3548 } else {
3549 CAM_ERR(CAM_ISP,
3550 "no free kmd memory for base %d",
3551 blob_info->base_info->idx);
3552 rc = -ENOMEM;
3553 return rc;
3554 }
3555
3556 cmd_buf_addr = kmd_buf_info->cpu_addr +
3557 kmd_buf_info->used_bytes/4 +
3558 total_used_bytes/4;
3559 hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
3560
3561 rc = cam_isp_add_cmd_buf_update(
3562 hw_mgr_res, blob_type,
3563 blob_type_hw_cmd_map[blob_type],
3564 blob_info->base_info->idx,
3565 (void *)cmd_buf_addr,
3566 kmd_buf_remain_size,
3567 (void *)port_hfr_config,
3568 &bytes_used);
3569 if (rc < 0) {
3570 CAM_ERR(CAM_ISP,
3571 "Failed cmd_update, base_idx=%d, rc=%d",
3572 blob_info->base_info->idx, bytes_used);
3573 return rc;
3574 }
3575
3576 total_used_bytes += bytes_used;
3577 }
3578
3579 if (total_used_bytes) {
3580 /* Update the HW entries */
3581 num_ent = prepare->num_hw_update_entries;
3582 prepare->hw_update_entries[num_ent].handle =
3583 kmd_buf_info->handle;
3584 prepare->hw_update_entries[num_ent].len = total_used_bytes;
3585 prepare->hw_update_entries[num_ent].offset =
3586 kmd_buf_info->offset;
3587 num_ent++;
3588
3589 kmd_buf_info->used_bytes += total_used_bytes;
3590 kmd_buf_info->offset += total_used_bytes;
3591 prepare->num_hw_update_entries = num_ent;
3592 }
3593
3594 return rc;
3595}
3596
3597static int cam_isp_blob_csid_clock_update(
3598 uint32_t blob_type,
3599 struct cam_isp_generic_blob_info *blob_info,
3600 struct cam_isp_csid_clock_config *clock_config,
3601 struct cam_hw_prepare_update_args *prepare)
3602{
3603 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3604 struct cam_ife_hw_mgr_res *hw_mgr_res;
3605 struct cam_hw_intf *hw_intf;
3606 struct cam_ife_csid_clock_update_args csid_clock_upd_args;
3607 uint64_t clk_rate = 0;
3608 int rc = -EINVAL;
3609 uint32_t i;
3610
3611 ctx = prepare->ctxt_to_hw_map;
3612
3613 CAM_DBG(CAM_ISP,
3614 "csid clk=%llu", clock_config->csid_clock);
3615
3616 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
3617 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3618 clk_rate = 0;
3619 if (!hw_mgr_res->hw_res[i])
3620 continue;
3621 clk_rate = clock_config->csid_clock;
3622 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3623 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3624 csid_clock_upd_args.clk_rate = clk_rate;
3625 CAM_DBG(CAM_ISP, "i= %d clk=%llu\n",
3626 i, csid_clock_upd_args.clk_rate);
3627
3628 rc = hw_intf->hw_ops.process_cmd(
3629 hw_intf->hw_priv,
3630 blob_type_hw_cmd_map[blob_type],
3631 &csid_clock_upd_args,
3632 sizeof(
3633 struct cam_ife_csid_clock_update_args));
3634 if (rc)
3635 CAM_ERR(CAM_ISP, "Clock Update failed");
3636 } else
3637 CAM_ERR(CAM_ISP, "NULL hw_intf!");
3638 }
3639 }
3640
3641 return rc;
3642}
3643
3644static int cam_isp_blob_clock_update(
3645 uint32_t blob_type,
3646 struct cam_isp_generic_blob_info *blob_info,
3647 struct cam_isp_clock_config *clock_config,
3648 struct cam_hw_prepare_update_args *prepare)
3649{
3650 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3651 struct cam_ife_hw_mgr_res *hw_mgr_res;
3652 struct cam_hw_intf *hw_intf;
3653 struct cam_vfe_clock_update_args clock_upd_args;
3654 uint64_t clk_rate = 0;
3655 int rc = -EINVAL;
3656 uint32_t i;
3657 uint32_t j;
3658 bool camif_l_clk_updated = false;
3659 bool camif_r_clk_updated = false;
3660
3661 ctx = prepare->ctxt_to_hw_map;
3662
3663 CAM_DBG(CAM_PERF,
3664 "usage=%u left_clk= %lu right_clk=%lu",
3665 clock_config->usage_type,
3666 clock_config->left_pix_hz,
3667 clock_config->right_pix_hz);
3668
3669 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
3670 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3671 clk_rate = 0;
3672 if (!hw_mgr_res->hw_res[i])
3673 continue;
3674
3675 if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
3676 if (i == CAM_ISP_HW_SPLIT_LEFT) {
3677 if (camif_l_clk_updated)
3678 continue;
3679
3680 clk_rate =
3681 clock_config->left_pix_hz;
3682
3683 camif_l_clk_updated = true;
3684 } else {
3685 if (camif_r_clk_updated)
3686 continue;
3687
3688 clk_rate =
3689 clock_config->right_pix_hz;
3690
3691 camif_r_clk_updated = true;
3692 }
3693 } else if (hw_mgr_res->res_id ==
3694 CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
3695 if (i == CAM_ISP_HW_SPLIT_LEFT) {
3696 if (camif_l_clk_updated)
3697 continue;
3698
3699 clk_rate =
3700 clock_config->left_pix_hz;
3701
3702 camif_l_clk_updated = true;
3703 } else {
3704 if (camif_r_clk_updated)
3705 continue;
3706
3707 clk_rate =
3708 clock_config->right_pix_hz;
3709
3710 camif_r_clk_updated = true;
3711 }
3712 } else if ((hw_mgr_res->res_id >=
Raja Mallikfe46d932019-02-12 20:34:07 +05303713 CAM_ISP_HW_VFE_IN_RD) && (hw_mgr_res->res_id
Raja Mallikc7e256f2018-12-06 17:36:28 +05303714 <= CAM_ISP_HW_VFE_IN_RDI3))
3715 for (j = 0; j < clock_config->num_rdi; j++)
3716 clk_rate = max(clock_config->rdi_hz[j],
3717 clk_rate);
3718 else
3719 if (hw_mgr_res->hw_res[i]) {
3720 CAM_ERR(CAM_ISP, "Invalid res_id %u",
3721 hw_mgr_res->res_id);
3722 rc = -EINVAL;
3723 return rc;
3724 }
3725
3726 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3727 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3728 clock_upd_args.node_res =
3729 hw_mgr_res->hw_res[i];
3730 CAM_DBG(CAM_ISP,
3731 "res_id=%u i= %d clk=%llu\n",
3732 hw_mgr_res->res_id, i, clk_rate);
3733
3734 clock_upd_args.clk_rate = clk_rate;
3735
3736 rc = hw_intf->hw_ops.process_cmd(
3737 hw_intf->hw_priv,
3738 CAM_ISP_HW_CMD_CLOCK_UPDATE,
3739 &clock_upd_args,
3740 sizeof(
3741 struct cam_vfe_clock_update_args));
3742 if (rc)
3743 CAM_ERR(CAM_ISP, "Clock Update failed");
3744 } else
3745 CAM_WARN(CAM_ISP, "NULL hw_intf!");
3746 }
3747 }
3748
3749 return rc;
3750}
3751
Raja Mallike3ed1a32019-08-22 17:12:32 +05303752static int cam_isp_blob_sensor_config(
3753 uint32_t blob_type,
3754 struct cam_isp_generic_blob_info *blob_info,
3755 struct cam_isp_sensor_config *dim_config,
3756 struct cam_hw_prepare_update_args *prepare)
3757{
3758 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3759 struct cam_ife_hw_mgr_res *hw_mgr_res;
3760 struct cam_hw_intf *hw_intf;
3761 struct cam_ife_sensor_dimension_update_args update_args;
3762 int rc = -EINVAL, found = 0;
3763 uint32_t i, j;
3764 struct cam_isp_sensor_dimension *path_config;
3765
3766 ctx = prepare->ctxt_to_hw_map;
3767
3768 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
3769 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3770 if (!hw_mgr_res->hw_res[i])
3771 continue;
3772 found = 1;
3773 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3774 if (hw_intf && hw_intf->hw_ops.process_cmd) {
3775 path_config = &(dim_config->ipp_path);
3776 update_args.ipp_path.width =
3777 path_config->width;
3778 update_args.ipp_path.height =
3779 path_config->height;
3780 update_args.ipp_path.measure_enabled =
3781 path_config->measure_enabled;
3782 path_config = &(dim_config->ppp_path);
3783 update_args.ppp_path.width =
3784 path_config->width;
3785 update_args.ppp_path.height =
3786 path_config->height;
3787 update_args.ppp_path.measure_enabled =
3788 path_config->measure_enabled;
3789 for (j = 0; j < CAM_IFE_RDI_NUM_MAX; j++) {
3790 path_config =
3791 &(dim_config->rdi_path[j]);
3792 update_args.rdi_path[j].width =
3793 path_config->width;
3794 update_args.rdi_path[j].height =
3795 path_config->height;
3796 update_args.rdi_path[j].measure_enabled =
3797 path_config->measure_enabled;
3798 }
3799 rc = hw_intf->hw_ops.process_cmd(
3800 hw_intf->hw_priv,
3801 CAM_IFE_CSID_SET_SENSOR_DIMENSION_CFG,
3802 &update_args,
3803 sizeof(
3804 struct
3805 cam_ife_sensor_dimension_update_args)
3806 );
3807 if (rc)
3808 CAM_ERR(CAM_ISP,
3809 "Dimension Update failed");
3810 } else
3811 CAM_ERR(CAM_ISP, "hw_intf is NULL");
3812 }
3813 if (found)
3814 break;
3815 }
3816
3817 return rc;
3818}
3819
3820
Raja Mallikd268c822019-02-18 13:50:39 +05303821void fill_res_bitmap(uint32_t resource_type, unsigned long *res_bitmap)
3822{
3823
3824 switch (resource_type) {
3825 case CAM_ISP_IFE_OUT_RES_FULL:
3826 case CAM_ISP_IFE_OUT_RES_DS4:
3827 case CAM_ISP_IFE_OUT_RES_DS16:
3828 case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
3829 case CAM_ISP_IFE_OUT_RES_FD:
3830 case CAM_ISP_IFE_OUT_RES_PDAF:
3831 case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
3832 case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
3833 case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
3834 case CAM_ISP_IFE_OUT_RES_STATS_BF:
3835 case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
3836 case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
3837 case CAM_ISP_IFE_OUT_RES_STATS_RS:
3838 case CAM_ISP_IFE_OUT_RES_STATS_CS:
3839 case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
3840 case CAM_ISP_IFE_OUT_RES_FULL_DISP:
3841 case CAM_ISP_IFE_OUT_RES_DS4_DISP:
3842 case CAM_ISP_IFE_OUT_RES_DS16_DISP:
3843 case CAM_ISP_IFE_IN_RES_RD:
3844 set_bit(CAM_IFE_REG_UPD_CMD_PIX_BIT, res_bitmap);
3845 break;
3846 case CAM_ISP_IFE_OUT_RES_RDI_0:
3847 set_bit(CAM_IFE_REG_UPD_CMD_RDI0_BIT, res_bitmap);
3848 break;
3849 case CAM_ISP_IFE_OUT_RES_RDI_1:
3850 set_bit(CAM_IFE_REG_UPD_CMD_RDI1_BIT, res_bitmap);
3851 break;
3852 case CAM_ISP_IFE_OUT_RES_RDI_2:
3853 set_bit(CAM_IFE_REG_UPD_CMD_RDI2_BIT, res_bitmap);
3854 break;
3855 case CAM_ISP_IFE_OUT_RES_RDI_3:
3856 set_bit(CAM_IFE_REG_UPD_CMD_RDI3_BIT, res_bitmap);
3857 break;
3858 case CAM_ISP_IFE_OUT_RES_2PD:
3859 set_bit(CAM_IFE_REG_UPD_CMD_DUAL_PD_BIT,
3860 res_bitmap);
3861 break;
3862 default:
3863 CAM_ERR(CAM_ISP, "Invalid resource");
3864 break;
3865 }
3866}
3867
Raja Mallik477ca242019-06-21 14:23:32 +05303868static int cam_isp_blob_init_frame_drop(
3869 struct cam_isp_init_frame_drop_config *frame_drop_cfg,
3870 struct cam_hw_prepare_update_args *prepare)
3871{
3872 struct cam_ife_hw_mgr_ctx *ctx = NULL;
3873 struct cam_ife_hw_mgr_res *hw_mgr_res;
3874 struct cam_hw_intf *hw_intf;
3875 uint32_t hw_idx = UINT_MAX;
3876 uint32_t i;
3877 int rc = 0;
3878
3879 ctx = prepare->ctxt_to_hw_map;
3880 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
3881 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
3882 if (!hw_mgr_res->hw_res[i])
3883 continue;
3884
3885 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
3886 if (hw_intf->hw_idx == hw_idx)
3887 continue;
3888
3889 rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
3890 CAM_IFE_CSID_SET_INIT_FRAME_DROP,
3891 frame_drop_cfg,
3892 sizeof(
3893 struct cam_isp_init_frame_drop_config *));
3894 hw_idx = hw_intf->hw_idx;
3895 }
3896 }
3897 return rc;
3898}
3899
Raja Mallikc7e256f2018-12-06 17:36:28 +05303900static int cam_isp_packet_generic_blob_handler(void *user_data,
3901 uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
3902{
3903 int rc = 0;
3904 struct cam_isp_generic_blob_info *blob_info = user_data;
3905 struct cam_hw_prepare_update_args *prepare = NULL;
3906
3907 if (!blob_data || (blob_size == 0) || !blob_info) {
Raja Mallikf2269d02019-03-15 18:30:01 +05303908 CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK",
3909 blob_data, blob_size, blob_info);
Raja Mallikc7e256f2018-12-06 17:36:28 +05303910 return -EINVAL;
3911 }
3912
Raja Mallikc7e256f2018-12-06 17:36:28 +05303913 prepare = blob_info->prepare;
3914 if (!prepare) {
3915 CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
3916 blob_type);
3917 return -EINVAL;
3918 }
3919
3920 switch (blob_type) {
3921 case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05303922 struct cam_isp_resource_hfr_config *hfr_config;
3923
3924 if (blob_size < sizeof(struct cam_isp_resource_hfr_config)) {
3925 CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
3926 return -EINVAL;
3927 }
3928
3929 hfr_config = (struct cam_isp_resource_hfr_config *)blob_data;
3930
3931 if (hfr_config->num_ports > CAM_ISP_IFE_OUT_RES_MAX) {
3932 CAM_ERR(CAM_ISP, "Invalid num_ports %u in hfr config",
3933 hfr_config->num_ports);
3934 return -EINVAL;
3935 }
3936
3937 if (blob_size < (sizeof(uint32_t) * 2 + hfr_config->num_ports *
3938 sizeof(struct cam_isp_port_hfr_config))) {
3939 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
3940 blob_size, sizeof(uint32_t) * 2 +
3941 sizeof(struct cam_isp_port_hfr_config) *
3942 hfr_config->num_ports);
3943 return -EINVAL;
3944 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05303945
3946 rc = cam_isp_blob_hfr_update(blob_type, blob_info,
3947 hfr_config, prepare);
3948 if (rc)
3949 CAM_ERR(CAM_ISP, "HFR Update Failed");
3950 }
3951 break;
3952 case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05303953 struct cam_isp_clock_config *clock_config;
3954
3955 if (blob_size < sizeof(struct cam_isp_clock_config)) {
3956 CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
3957 return -EINVAL;
3958 }
3959
3960 clock_config = (struct cam_isp_clock_config *)blob_data;
3961
3962 if (clock_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
3963 CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config",
3964 clock_config->num_rdi);
3965 return -EINVAL;
3966 }
3967
3968 if (blob_size < (sizeof(uint32_t) * 2 + sizeof(uint64_t) *
3969 (clock_config->num_rdi + 2))) {
3970 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
3971 blob_size,
3972 sizeof(uint32_t) * 2 + sizeof(uint64_t) *
3973 (clock_config->num_rdi + 2));
3974 return -EINVAL;
3975 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05303976
3977 rc = cam_isp_blob_clock_update(blob_type, blob_info,
3978 clock_config, prepare);
3979 if (rc)
3980 CAM_ERR(CAM_ISP, "Clock Update Failed");
3981 }
3982 break;
3983 case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05303984 struct cam_isp_bw_config *bw_config;
Raja Mallikc7e256f2018-12-06 17:36:28 +05303985 struct cam_isp_prepare_hw_update_data *prepare_hw_data;
3986
Raja Mallikf2269d02019-03-15 18:30:01 +05303987 if (blob_size < sizeof(struct cam_isp_bw_config)) {
3988 CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
3989 return -EINVAL;
3990 }
3991
3992 bw_config = (struct cam_isp_bw_config *)blob_data;
3993
3994 if (bw_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
3995 CAM_ERR(CAM_ISP, "Invalid num_rdi %u in bw config",
3996 bw_config->num_rdi);
3997 return -EINVAL;
3998 }
3999
4000 if (blob_size < (sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
4001 * sizeof(struct cam_isp_bw_vote))) {
4002 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4003 blob_size,
4004 sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
4005 * sizeof(struct cam_isp_bw_vote));
4006 return -EINVAL;
4007 }
4008
Raja Mallikc7e256f2018-12-06 17:36:28 +05304009 if (!prepare || !prepare->priv ||
4010 (bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
4011 CAM_ERR(CAM_ISP, "Invalid inputs");
4012 rc = -EINVAL;
4013 break;
4014 }
4015
4016 prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
4017 prepare->priv;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304018 memcpy(&prepare_hw_data->bw_config[bw_config->usage_type],
4019 bw_config, sizeof(prepare_hw_data->bw_config[0]));
Raja Mallik8b88b232019-04-04 14:32:27 +05304020 memset(&prepare_hw_data->bw_config_ab[bw_config->usage_type],
4021 0, sizeof(prepare_hw_data->bw_config_ab[0]));
Raja Mallikc7e256f2018-12-06 17:36:28 +05304022 prepare_hw_data->bw_config_valid[bw_config->usage_type] = true;
4023
4024 }
4025 break;
Raja Mallik8b88b232019-04-04 14:32:27 +05304026 case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2: {
Raja Mallik97148192019-04-09 11:57:28 +05304027 struct cam_isp_bw_config_ab *bw_config_ab;
4028
Raja Mallik8b88b232019-04-04 14:32:27 +05304029 struct cam_isp_prepare_hw_update_data *prepare_hw_data;
4030
Raja Mallik97148192019-04-09 11:57:28 +05304031 if (blob_size < sizeof(struct cam_isp_bw_config_ab)) {
4032 CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
4033 return -EINVAL;
4034 }
4035
4036 bw_config_ab = (struct cam_isp_bw_config_ab *)blob_data;
4037
4038 if (bw_config_ab->num_rdi > CAM_IFE_RDI_NUM_MAX) {
4039 CAM_ERR(CAM_ISP, "Invalid num_rdi %u in bw config ab",
4040 bw_config_ab->num_rdi);
4041 return -EINVAL;
4042 }
4043
4044 if (blob_size < (sizeof(uint32_t) * 2
4045 + (bw_config_ab->num_rdi + 2)
4046 * sizeof(struct cam_isp_bw_vote))) {
4047 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4048 blob_size,
4049 sizeof(uint32_t) * 2
4050 + (bw_config_ab->num_rdi + 2)
4051 * sizeof(struct cam_isp_bw_vote));
4052 return -EINVAL;
4053 }
Raja Mallik8b88b232019-04-04 14:32:27 +05304054 CAM_DBG(CAM_ISP, "AB L:%lld R:%lld usage_type %d",
4055 bw_config_ab->left_pix_vote_ab,
4056 bw_config_ab->right_pix_vote_ab,
4057 bw_config_ab->usage_type);
4058
4059 if (!prepare || !prepare->priv ||
4060 (bw_config_ab->usage_type >= CAM_IFE_HW_NUM_MAX)) {
4061 CAM_ERR(CAM_ISP, "Invalid inputs");
4062 rc = -EINVAL;
4063 break;
4064 }
4065 prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
4066 prepare->priv;
4067
4068 memcpy(&prepare_hw_data->bw_config_ab[bw_config_ab->usage_type],
4069 bw_config_ab, sizeof(prepare_hw_data->bw_config_ab[0]));
4070 }
4071 break;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304072 case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05304073 struct cam_ubwc_config *ubwc_config;
4074
4075 if (blob_size < sizeof(struct cam_ubwc_config)) {
4076 CAM_ERR(CAM_ISP, "Invalid blob_size %u", blob_size);
4077 return -EINVAL;
4078 }
4079
4080 ubwc_config = (struct cam_ubwc_config *)blob_data;
4081
4082 if (ubwc_config->num_ports > CAM_ISP_IFE_OUT_RES_MAX) {
4083 CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config",
4084 ubwc_config->num_ports);
4085 return -EINVAL;
4086 }
4087
4088 if (blob_size < (sizeof(uint32_t) * 2 + ubwc_config->num_ports *
4089 sizeof(struct cam_ubwc_plane_cfg_v1) * 2)) {
4090 CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %lu",
4091 blob_size,
4092 sizeof(uint32_t) * 2 + ubwc_config->num_ports *
4093 sizeof(struct cam_ubwc_plane_cfg_v1) * 2);
4094 return -EINVAL;
4095 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05304096
4097 rc = cam_isp_blob_ubwc_update(blob_type, blob_info,
4098 ubwc_config, prepare);
4099 if (rc)
4100 CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d", rc);
4101 }
4102 break;
4103 case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05304104 struct cam_isp_csid_clock_config *clock_config;
4105
4106 if (blob_size < sizeof(struct cam_isp_csid_clock_config)) {
4107 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4108 blob_size,
4109 sizeof(struct cam_isp_csid_clock_config));
4110 return -EINVAL;
4111 }
4112
4113 clock_config = (struct cam_isp_csid_clock_config *)blob_data;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304114
4115 rc = cam_isp_blob_csid_clock_update(blob_type, blob_info,
4116 clock_config, prepare);
4117 if (rc)
4118 CAM_ERR(CAM_ISP, "Clock Update Failed");
4119 }
4120 break;
Raja Mallikfe46d932019-02-12 20:34:07 +05304121 case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: {
Raja Mallikf2269d02019-03-15 18:30:01 +05304122 struct cam_fe_config *fe_config;
4123
4124 if (blob_size < sizeof(struct cam_fe_config)) {
4125 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4126 blob_size, sizeof(struct cam_fe_config));
4127 return -EINVAL;
4128 }
4129
4130 fe_config = (struct cam_fe_config *)blob_data;
4131
Raja Mallikfe46d932019-02-12 20:34:07 +05304132 rc = cam_isp_blob_fe_update(blob_type, blob_info,
4133 fe_config, prepare);
4134 if (rc)
4135 CAM_ERR(CAM_ISP, "FS Update Failed rc: %d", rc);
4136 }
4137 break;
Raja Mallik477ca242019-06-21 14:23:32 +05304138 case CAM_ISP_GENERIC_BLOB_TYPE_INIT_FRAME_DROP: {
4139 struct cam_isp_init_frame_drop_config *frame_drop_cfg =
4140 (struct cam_isp_init_frame_drop_config *)blob_data;
Raja Mallikff6c75b2019-01-29 16:52:37 +05304141
Raja Mallik477ca242019-06-21 14:23:32 +05304142 if (blob_size < sizeof(struct cam_isp_init_frame_drop_config)) {
4143 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4144 blob_size,
4145 sizeof(struct cam_isp_init_frame_drop_config));
4146 return -EINVAL;
4147 }
4148
4149 rc = cam_isp_blob_init_frame_drop(frame_drop_cfg, prepare);
4150 if (rc)
4151 CAM_ERR(CAM_ISP, "Init Frame drop Update Failed");
4152 }
4153 break;
Raja Mallike3ed1a32019-08-22 17:12:32 +05304154 case CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_DIMENSION_CONFIG: {
4155 struct cam_isp_sensor_config *csid_dim_config;
4156
4157 if (blob_size < sizeof(struct cam_isp_sensor_config)) {
4158 CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu",
4159 blob_size,
4160 sizeof(struct cam_isp_sensor_config));
4161 return -EINVAL;
4162 }
4163
4164 csid_dim_config =
4165 (struct cam_isp_sensor_config *)blob_data;
4166
4167 rc = cam_isp_blob_sensor_config(blob_type, blob_info,
4168 csid_dim_config, prepare);
4169 if (rc)
4170 CAM_ERR(CAM_ISP,
4171 "Sensor Dimension Update Failed rc: %d", rc);
4172 }
4173 break;
4174 case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: {
4175 struct cam_fps_config *fps_config;
4176 struct cam_isp_prepare_hw_update_data *prepare_hw_data;
4177
4178 if (blob_size < sizeof(struct cam_fps_config)) {
4179 CAM_ERR(CAM_ISP,
4180 "Invalid fps blob size %u expected %lu",
4181 blob_size, sizeof(struct cam_fps_config));
4182 return -EINVAL;
4183 }
4184
4185 fps_config = (struct cam_fps_config *)blob_data;
4186 prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
4187 prepare->priv;
4188
4189 prepare_hw_data->fps = fps_config->fps;
4190
4191 rc = cam_isp_blob_fps_config(blob_type, blob_info,
4192 fps_config, prepare);
4193 if (rc)
4194 CAM_ERR(CAM_ISP, "FPS Update Failed rc: %d", rc);
4195
4196 }
4197 break;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304198 default:
4199 CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
4200 break;
4201 }
4202
4203 return rc;
4204}
4205
4206static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
4207 void *prepare_hw_update_args)
4208{
4209 int rc = 0;
4210 struct cam_hw_prepare_update_args *prepare =
4211 (struct cam_hw_prepare_update_args *) prepare_hw_update_args;
4212 struct cam_ife_hw_mgr_ctx *ctx;
4213 struct cam_ife_hw_mgr *hw_mgr;
4214 struct cam_kmd_buf_info kmd_buf;
4215 uint32_t i;
4216 bool fill_fence = true;
4217 struct cam_isp_prepare_hw_update_data *prepare_hw_data;
4218
4219 if (!hw_mgr_priv || !prepare_hw_update_args) {
4220 CAM_ERR(CAM_ISP, "Invalid args");
4221 return -EINVAL;
4222 }
4223
4224 CAM_DBG(CAM_REQ, "Enter for req_id %lld",
4225 prepare->packet->header.request_id);
4226
4227 prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
4228 prepare->priv;
4229
4230 ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
4231 hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
4232
Raja Mallikff6c75b2019-01-29 16:52:37 +05304233 rc = cam_packet_util_validate_packet(prepare->packet,
4234 prepare->remain_len);
Raja Mallikc7e256f2018-12-06 17:36:28 +05304235 if (rc)
4236 return rc;
4237
4238 /* Pre parse the packet*/
4239 rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
4240 if (rc)
4241 return rc;
4242
4243 rc = cam_packet_util_process_patches(prepare->packet,
4244 hw_mgr->mgr_common.cmd_iommu_hdl,
Raja Mallik311ee712019-05-02 15:51:11 +05304245 hw_mgr->mgr_common.cmd_iommu_hdl_secure,
4246 0);
Raja Mallikc7e256f2018-12-06 17:36:28 +05304247 if (rc) {
4248 CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
4249 return rc;
4250 }
4251
4252 prepare->num_hw_update_entries = 0;
4253 prepare->num_in_map_entries = 0;
4254 prepare->num_out_map_entries = 0;
4255
4256 memset(&prepare_hw_data->bw_config[0], 0x0,
4257 sizeof(prepare_hw_data->bw_config[0]) *
4258 CAM_IFE_HW_NUM_MAX);
4259 memset(&prepare_hw_data->bw_config_valid[0], 0x0,
4260 sizeof(prepare_hw_data->bw_config_valid[0]) *
4261 CAM_IFE_HW_NUM_MAX);
4262
4263 for (i = 0; i < ctx->num_base; i++) {
4264 CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
4265
4266 /* Add change base */
4267 rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
4268 ctx->base[i].idx, &kmd_buf);
4269 if (rc) {
4270 CAM_ERR(CAM_ISP,
4271 "Failed in change base i=%d, idx=%d, rc=%d",
4272 i, ctx->base[i].idx, rc);
4273 goto end;
4274 }
4275
4276
4277 /* get command buffers */
4278 if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
4279 rc = cam_isp_add_command_buffers(prepare, &kmd_buf,
4280 &ctx->base[i],
4281 cam_isp_packet_generic_blob_handler,
4282 ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
4283 if (rc) {
4284 CAM_ERR(CAM_ISP,
4285 "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
4286 i, ctx->base[i].split_id, rc);
4287 goto end;
4288 }
4289 }
4290
4291 /* get IO buffers */
4292 rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
4293 hw_mgr->mgr_common.img_iommu_hdl_secure,
4294 prepare, ctx->base[i].idx,
4295 &kmd_buf, ctx->res_list_ife_out,
Raja Mallikfe46d932019-02-12 20:34:07 +05304296 &ctx->res_list_ife_in_rd,
Raja Mallikd268c822019-02-18 13:50:39 +05304297 CAM_IFE_HW_OUT_RES_MAX, fill_fence,
4298 &ctx->res_bitmap,
4299 fill_res_bitmap);
Raja Mallikc7e256f2018-12-06 17:36:28 +05304300
4301 if (rc) {
4302 CAM_ERR(CAM_ISP,
4303 "Failed in io buffers, i=%d, rc=%d",
4304 i, rc);
4305 goto end;
4306 }
4307
4308 /* fence map table entries need to fill only once in the loop */
4309 if (fill_fence)
4310 fill_fence = false;
4311 }
4312
4313 /*
4314 * reg update will be done later for the initial configure.
4315 * need to plus one to the op_code and only take the lower
4316 * bits to get the type of operation since UMD definition
4317 * of op_code has some difference from KMD.
4318 */
4319 if (((prepare->packet->header.op_code + 1) & 0xF) ==
4320 CAM_ISP_PACKET_INIT_DEV) {
4321 prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV;
4322 goto end;
4323 } else
4324 prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
4325
4326 /* add reg update commands */
4327 for (i = 0; i < ctx->num_base; i++) {
4328 /* Add change base */
4329 rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
4330 ctx->base[i].idx, &kmd_buf);
4331 if (rc) {
4332 CAM_ERR(CAM_ISP,
4333 "Failed in change base adding reg_update cmd i=%d, idx=%d, rc=%d",
4334 i, ctx->base[i].idx, rc);
4335 goto end;
4336 }
4337
4338 /*Add reg update */
4339 rc = cam_isp_add_reg_update(prepare, &ctx->res_list_ife_src,
Raja Mallikd268c822019-02-18 13:50:39 +05304340 ctx->base[i].idx, &kmd_buf, ctx->is_fe_enable,
4341 ctx->res_bitmap);
Raja Mallikc7e256f2018-12-06 17:36:28 +05304342 if (rc) {
4343 CAM_ERR(CAM_ISP,
4344 "Add Reg_update cmd Failed i=%d, idx=%d, rc=%d",
4345 i, ctx->base[i].idx, rc);
4346 goto end;
4347 }
4348 }
Raja Mallikd268c822019-02-18 13:50:39 +05304349 ctx->res_bitmap = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304350
4351end:
4352 return rc;
4353}
4354
Raja Mallikc7e256f2018-12-06 17:36:28 +05304355
4356static int cam_ife_mgr_sof_irq_debug(
4357 struct cam_ife_hw_mgr_ctx *ctx,
4358 uint32_t sof_irq_enable)
4359{
4360 int rc = 0;
4361 uint32_t i = 0;
4362 struct cam_ife_hw_mgr_res *hw_mgr_res = NULL;
4363 struct cam_hw_intf *hw_intf = NULL;
4364 struct cam_isp_resource_node *rsrc_node = NULL;
4365
4366 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
4367 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
4368 if (!hw_mgr_res->hw_res[i])
4369 continue;
4370
4371 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
4372 if (hw_intf->hw_ops.process_cmd) {
4373 rc |= hw_intf->hw_ops.process_cmd(
4374 hw_intf->hw_priv,
4375 CAM_IFE_CSID_SOF_IRQ_DEBUG,
4376 &sof_irq_enable,
4377 sizeof(sof_irq_enable));
4378 }
4379 }
4380 }
4381
4382 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
4383 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
4384 if (!hw_mgr_res->hw_res[i])
4385 continue;
4386
4387 rsrc_node = hw_mgr_res->hw_res[i];
4388 if (rsrc_node->process_cmd && (rsrc_node->res_id ==
4389 CAM_ISP_HW_VFE_IN_CAMIF)) {
4390 rc |= hw_mgr_res->hw_res[i]->process_cmd(
4391 hw_mgr_res->hw_res[i],
4392 CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
4393 &sof_irq_enable,
4394 sizeof(sof_irq_enable));
4395 }
4396 }
4397 }
4398
4399 return rc;
4400}
4401
4402static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet,
4403 int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
4404 bool *mem_found)
4405{
4406 uint64_t iova_addr;
4407 size_t src_buf_size;
4408 int i;
4409 int j;
4410 int rc = 0;
4411 int32_t mmu_hdl;
4412
4413 struct cam_buf_io_cfg *io_cfg = NULL;
4414
4415 if (mem_found)
4416 *mem_found = false;
4417
4418 io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
4419 packet->io_configs_offset / 4);
4420
4421 for (i = 0; i < packet->num_io_configs; i++) {
4422 for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
Raja Mallik7c96c832019-08-05 14:40:23 +05304423 if (!io_cfg[i].mem_handle[j]) {
4424 CAM_ERR(CAM_ISP,
4425 "Mem Handle %d is NULL for %d io config",
4426 j, i);
Raja Mallikc7e256f2018-12-06 17:36:28 +05304427 break;
Raja Mallik7c96c832019-08-05 14:40:23 +05304428 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05304429
Raja Mallikfe46d932019-02-12 20:34:07 +05304430 if (pf_buf_info &&
4431 GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
Raja Mallikc7e256f2018-12-06 17:36:28 +05304432 GET_FD_FROM_HANDLE(pf_buf_info)) {
4433 CAM_INFO(CAM_ISP,
4434 "Found PF at port: 0x%x mem 0x%x fd: 0x%x",
4435 io_cfg[i].resource_type,
4436 io_cfg[i].mem_handle[j],
4437 pf_buf_info);
4438 if (mem_found)
4439 *mem_found = true;
4440 }
4441
4442 CAM_INFO(CAM_ISP, "port: 0x%x f: %u format: %d dir %d",
4443 io_cfg[i].resource_type,
4444 io_cfg[i].fence,
4445 io_cfg[i].format,
4446 io_cfg[i].direction);
4447
4448 mmu_hdl = cam_mem_is_secure_buf(
4449 io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
4450 iommu_hdl;
4451 rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
4452 mmu_hdl, &iova_addr, &src_buf_size);
4453 if (rc < 0) {
4454 CAM_ERR(CAM_ISP,
4455 "get src buf address fail mem_handle 0x%x",
4456 io_cfg[i].mem_handle[j]);
4457 continue;
4458 }
4459 if (iova_addr >> 32) {
4460 CAM_ERR(CAM_ISP, "Invalid mapped address");
4461 rc = -EINVAL;
4462 continue;
4463 }
4464
4465 CAM_INFO(CAM_ISP,
Raja Mallikfe46d932019-02-12 20:34:07 +05304466 "pln %d w %d h %d s %u size 0x%x addr 0x%x end_addr 0x%x offset %x memh %x",
Raja Mallikc7e256f2018-12-06 17:36:28 +05304467 j, io_cfg[i].planes[j].width,
4468 io_cfg[i].planes[j].height,
Raja Mallikfe46d932019-02-12 20:34:07 +05304469 io_cfg[i].planes[j].plane_stride,
Raja Mallikc7e256f2018-12-06 17:36:28 +05304470 (unsigned int)src_buf_size,
4471 (unsigned int)iova_addr,
4472 (unsigned int)iova_addr +
4473 (unsigned int)src_buf_size,
4474 io_cfg[i].offsets[j],
4475 io_cfg[i].mem_handle[j]);
4476 }
4477 }
4478}
4479
Raja Mallik477ca242019-06-21 14:23:32 +05304480static void cam_ife_mgr_ctx_irq_dump(struct cam_ife_hw_mgr_ctx *ctx)
4481{
4482 struct cam_ife_hw_mgr_res *hw_mgr_res;
4483 struct cam_hw_intf *hw_intf;
4484 struct cam_isp_hw_get_cmd_update cmd_update;
4485 int i = 0;
4486
4487 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
4488 if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
4489 continue;
4490 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
4491 if (!hw_mgr_res->hw_res[i])
4492 continue;
4493 switch (hw_mgr_res->hw_res[i]->res_id) {
4494 case CAM_ISP_HW_VFE_IN_CAMIF:
4495 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
4496 cmd_update.res = hw_mgr_res->hw_res[i];
4497 cmd_update.cmd_type =
4498 CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP;
4499 hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
4500 CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP,
4501 &cmd_update, sizeof(cmd_update));
4502 break;
4503 default:
4504 break;
4505 }
4506 }
4507 }
4508}
4509
Raja Mallikc7e256f2018-12-06 17:36:28 +05304510static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
4511{
4512 int rc = 0;
4513 struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
4514 struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
4515 struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *)
4516 hw_cmd_args->ctxt_to_hw_map;
4517 struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL;
4518
4519 if (!hw_mgr_priv || !cmd_args) {
4520 CAM_ERR(CAM_ISP, "Invalid arguments");
4521 return -EINVAL;
4522 }
4523
4524 if (!ctx || !ctx->ctx_in_use) {
4525 CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
4526 return -EPERM;
4527 }
4528
4529 switch (hw_cmd_args->cmd_type) {
4530 case CAM_HW_MGR_CMD_INTERNAL:
4531 if (!hw_cmd_args->u.internal_args) {
4532 CAM_ERR(CAM_ISP, "Invalid cmd arguments");
4533 return -EINVAL;
4534 }
4535
4536 isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *)
4537 hw_cmd_args->u.internal_args;
4538
4539 switch (isp_hw_cmd_args->cmd_type) {
Raja Mallikc7e256f2018-12-06 17:36:28 +05304540 case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
4541 cam_ife_mgr_pause_hw(ctx);
4542 break;
4543 case CAM_ISP_HW_MGR_CMD_RESUME_HW:
4544 cam_ife_mgr_resume_hw(ctx);
4545 break;
4546 case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
4547 cam_ife_mgr_sof_irq_debug(ctx,
4548 isp_hw_cmd_args->u.sof_irq_enable);
4549 break;
Raja Mallikff6c75b2019-01-29 16:52:37 +05304550 case CAM_ISP_HW_MGR_CMD_CTX_TYPE:
4551 if (ctx->is_fe_enable)
4552 isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_FS2;
4553 else if (ctx->is_rdi_only_context)
4554 isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_RDI;
4555 else
4556 isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_PIX;
4557 break;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304558 default:
4559 CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
4560 hw_cmd_args->cmd_type);
4561 rc = -EINVAL;
4562 break;
4563 }
4564 break;
4565 case CAM_HW_MGR_CMD_DUMP_PF_INFO:
4566 cam_ife_mgr_print_io_bufs(
4567 hw_cmd_args->u.pf_args.pf_data.packet,
4568 hw_mgr->mgr_common.img_iommu_hdl,
4569 hw_mgr->mgr_common.img_iommu_hdl_secure,
4570 hw_cmd_args->u.pf_args.buf_info,
4571 hw_cmd_args->u.pf_args.mem_found);
4572 break;
4573 default:
4574 CAM_ERR(CAM_ISP, "Invalid cmd");
4575 }
4576
4577 return rc;
4578}
4579
4580static int cam_ife_mgr_cmd_get_sof_timestamp(
4581 struct cam_ife_hw_mgr_ctx *ife_ctx,
4582 uint64_t *time_stamp,
4583 uint64_t *boot_time_stamp)
4584{
4585 int rc = -EINVAL;
4586 uint32_t i;
4587 struct cam_ife_hw_mgr_res *hw_mgr_res;
4588 struct cam_hw_intf *hw_intf;
4589 struct cam_csid_get_time_stamp_args csid_get_time;
4590
Raja Mallik7c96c832019-08-05 14:40:23 +05304591 hw_mgr_res = list_first_entry(&ife_ctx->res_list_ife_csid,
4592 struct cam_ife_hw_mgr_res, list);
4593 for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
4594 if (!hw_mgr_res->hw_res[i])
4595 continue;
Raja Mallikfe46d932019-02-12 20:34:07 +05304596
Raja Mallik7c96c832019-08-05 14:40:23 +05304597 /*
4598 * Get the SOF time stamp from left resource only.
4599 * Left resource is master for dual vfe case and
4600 * Rdi only context case left resource only hold
4601 * the RDI resource
4602 */
4603
4604 hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
4605 if (hw_intf->hw_ops.process_cmd) {
Raja Mallikc7e256f2018-12-06 17:36:28 +05304606 /*
Raja Mallik7c96c832019-08-05 14:40:23 +05304607 * Single VFE case, Get the time stamp from
4608 * available one csid hw in the context
4609 * Dual VFE case, get the time stamp from
4610 * master(left) would be sufficient
Raja Mallikc7e256f2018-12-06 17:36:28 +05304611 */
Raja Mallikfe46d932019-02-12 20:34:07 +05304612
Raja Mallik7c96c832019-08-05 14:40:23 +05304613 csid_get_time.node_res =
4614 hw_mgr_res->hw_res[i];
4615 rc = hw_intf->hw_ops.process_cmd(
4616 hw_intf->hw_priv,
4617 CAM_IFE_CSID_CMD_GET_TIME_STAMP,
4618 &csid_get_time,
4619 sizeof(
4620 struct cam_csid_get_time_stamp_args));
4621 if (!rc && (i == CAM_ISP_HW_SPLIT_LEFT)) {
4622 *time_stamp =
4623 csid_get_time.time_stamp_val;
4624 *boot_time_stamp =
4625 csid_get_time.boot_timestamp;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304626 }
4627 }
4628 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05304629 if (rc)
4630 CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
4631
4632 return rc;
4633}
4634
4635static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
4636{
4637 int32_t rc = 0;
4638 struct cam_hw_event_recovery_data *recovery_data = data;
4639 struct cam_hw_start_args start_args;
4640 struct cam_hw_stop_args stop_args;
4641 struct cam_ife_hw_mgr *ife_hw_mgr = priv;
4642 struct cam_ife_hw_mgr_res *hw_mgr_res;
4643 uint32_t i = 0;
4644
4645 uint32_t error_type = recovery_data->error_type;
4646 struct cam_ife_hw_mgr_ctx *ctx = NULL;
4647
4648 /* Here recovery is performed */
4649 CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
4650
4651 switch (error_type) {
4652 case CAM_ISP_HW_ERROR_OVERFLOW:
4653 case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
4654 if (!recovery_data->affected_ctx[0]) {
4655 CAM_ERR(CAM_ISP,
4656 "No context is affected but recovery called");
4657 kfree(recovery_data);
4658 return 0;
4659 }
4660 /* stop resources here */
4661 CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
4662 recovery_data->no_of_context);
4663 for (i = 0; i < recovery_data->no_of_context; i++) {
4664 stop_args.ctxt_to_hw_map =
4665 recovery_data->affected_ctx[i];
4666 rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
4667 if (rc) {
4668 CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
4669 return rc;
4670 }
4671 }
4672
4673 CAM_DBG(CAM_ISP, "RESET: CSID PATH");
4674 for (i = 0; i < recovery_data->no_of_context; i++) {
4675 ctx = recovery_data->affected_ctx[i];
4676 list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
4677 list) {
4678 rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
4679 if (rc) {
4680 CAM_ERR(CAM_ISP, "Failed RESET (%d)",
4681 hw_mgr_res->res_id);
4682 return rc;
4683 }
4684 }
4685 }
4686
4687 CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
4688
4689 for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
4690 if (recovery_data->affected_core[i])
4691 cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
4692 }
4693
4694 CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
4695 recovery_data->no_of_context);
4696
4697 for (i = 0; i < recovery_data->no_of_context; i++) {
4698 ctx = recovery_data->affected_ctx[i];
4699 start_args.ctxt_to_hw_map = ctx;
4700
4701 atomic_set(&ctx->overflow_pending, 0);
4702
4703 rc = cam_ife_mgr_restart_hw(&start_args);
4704 if (rc) {
4705 CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
4706 return rc;
4707 }
4708 CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
4709 }
4710 CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
4711
4712 break;
4713
4714 case CAM_ISP_HW_ERROR_P2I_ERROR:
4715 break;
4716
4717 case CAM_ISP_HW_ERROR_VIOLATION:
4718 break;
4719
4720 default:
4721 CAM_ERR(CAM_ISP, "Invalid Error");
4722 }
4723 CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
4724
4725 kfree(recovery_data);
4726 return rc;
4727}
4728
4729static int cam_ife_hw_mgr_do_error_recovery(
4730 struct cam_hw_event_recovery_data *ife_mgr_recovery_data)
4731{
4732 int32_t rc = 0;
4733 struct crm_workq_task *task = NULL;
4734 struct cam_hw_event_recovery_data *recovery_data = NULL;
4735
4736 recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
4737 GFP_ATOMIC);
4738 if (!recovery_data)
4739 return -ENOMEM;
4740
4741 memcpy(recovery_data, ife_mgr_recovery_data,
4742 sizeof(struct cam_hw_event_recovery_data));
4743
4744 CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
4745
4746 task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
4747 if (!task) {
4748 CAM_ERR(CAM_ISP, "No empty task frame");
4749 kfree(recovery_data);
4750 return -ENOMEM;
4751 }
4752
4753 task->process_cb = &cam_ife_mgr_process_recovery_cb;
4754 task->payload = recovery_data;
4755 rc = cam_req_mgr_workq_enqueue_task(task,
4756 recovery_data->affected_ctx[0]->hw_mgr,
4757 CRM_TASK_PRIORITY_0);
4758
4759 return rc;
4760}
4761
4762/*
4763 * This function checks if any of the valid entry in affected_core[]
4764 * is associated with this context. if YES
4765 * a. It fills the other cores associated with this context.in
4766 * affected_core[]
4767 * b. Return 1 if ctx is affected, 0 otherwise
4768 */
4769static int cam_ife_hw_mgr_is_ctx_affected(
4770 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
4771 uint32_t *affected_core, uint32_t size)
4772{
4773 int32_t rc = 0;
4774 uint32_t i = 0, j = 0;
4775 uint32_t max_idx = ife_hwr_mgr_ctx->num_base;
4776 uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
4777
4778 CAM_DBG(CAM_ISP, "max_idx = %d", max_idx);
4779
4780 if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
4781 (size > CAM_IFE_HW_NUM_MAX)) {
4782 CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
4783 return rc;
4784 }
4785
4786 for (i = 0; i < max_idx; i++) {
4787 if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
4788 rc = 1;
4789 else {
4790 ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
4791 CAM_DBG(CAM_ISP, "Add affected IFE %d for recovery",
4792 ctx_affected_core_idx[j]);
4793 j = j + 1;
4794 }
4795 }
4796
4797 if (rc == 1) {
4798 while (j) {
4799 if (affected_core[ctx_affected_core_idx[j-1]] != 1)
4800 affected_core[ctx_affected_core_idx[j-1]] = 1;
4801 j = j - 1;
4802 }
4803 }
4804
4805 return rc;
4806}
4807
4808/*
4809 * For any dual VFE context, if non-affected VFE is also serving
4810 * another context, then that context should also be notified with fatal error
4811 * So Loop through each context and -
4812 * a. match core_idx
4813 * b. Notify CTX with fatal error
4814 */
4815static int cam_ife_hw_mgr_find_affected_ctx(
4816 struct cam_ife_hw_mgr_ctx *curr_ife_hwr_mgr_ctx,
4817 struct cam_isp_hw_error_event_data *error_event_data,
4818 uint32_t curr_core_idx,
4819 struct cam_hw_event_recovery_data *recovery_data)
4820{
4821 uint32_t affected_core[CAM_IFE_HW_NUM_MAX] = {0};
4822 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
4823 cam_hw_event_cb_func notify_err_cb;
4824 struct cam_ife_hw_mgr *ife_hwr_mgr = NULL;
4825 enum cam_isp_hw_event_type event_type = CAM_ISP_HW_EVENT_ERROR;
4826 uint32_t i = 0;
4827
4828 if (!recovery_data) {
4829 CAM_ERR(CAM_ISP, "recovery_data parameter is NULL");
4830 return -EINVAL;
4831 }
4832
4833 recovery_data->no_of_context = 0;
4834 affected_core[curr_core_idx] = 1;
4835 ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
4836
4837 list_for_each_entry(ife_hwr_mgr_ctx,
4838 &ife_hwr_mgr->used_ctx_list, list) {
4839 /*
4840 * Check if current core_idx matches the HW associated
4841 * with this context
4842 */
4843 if (!cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
4844 affected_core, CAM_IFE_HW_NUM_MAX))
4845 continue;
4846
4847 atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
4848 notify_err_cb = ife_hwr_mgr_ctx->common.event_cb[event_type];
4849
4850 /* Add affected_context in list of recovery data */
4851 CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
4852 ife_hwr_mgr_ctx->ctx_index);
4853 if (recovery_data->no_of_context < CAM_CTX_MAX)
4854 recovery_data->affected_ctx[
4855 recovery_data->no_of_context++] =
4856 ife_hwr_mgr_ctx;
4857
4858 /*
4859 * In the call back function corresponding ISP context
4860 * will update CRM about fatal Error
4861 */
4862 notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
4863 CAM_ISP_HW_EVENT_ERROR, error_event_data);
4864 }
4865
4866 /* fill the affected_core in recovery data */
4867 for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
4868 recovery_data->affected_core[i] = affected_core[i];
4869 CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
4870 i, recovery_data->affected_core[i]);
4871 }
4872
4873 return 0;
4874}
4875
4876static int cam_ife_hw_mgr_get_err_type(
4877 void *handler_priv,
4878 void *payload)
4879{
4880 struct cam_isp_resource_node *hw_res_l = NULL;
4881 struct cam_isp_resource_node *hw_res_r = NULL;
4882 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
4883 struct cam_vfe_top_irq_evt_payload *evt_payload;
4884 struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
4885 uint32_t status = 0;
4886 uint32_t core_idx;
4887
4888 ife_hwr_mgr_ctx = handler_priv;
4889 evt_payload = payload;
4890
4891 if (!evt_payload) {
4892 CAM_ERR(CAM_ISP, "No payload");
4893 return IRQ_HANDLED;
4894 }
4895
4896 core_idx = evt_payload->core_index;
4897 evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
Raja Mallikd268c822019-02-18 13:50:39 +05304898 evt_payload->enable_reg_dump =
4899 g_ife_hw_mgr.debug_cfg.enable_reg_dump;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304900
4901 list_for_each_entry(isp_ife_camif_res,
4902 &ife_hwr_mgr_ctx->res_list_ife_src, list) {
4903
4904 if ((isp_ife_camif_res->res_type ==
4905 CAM_IFE_HW_MGR_RES_UNINIT) ||
4906 (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
4907 continue;
4908
4909 hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
4910 hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
4911
4912 CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
4913 isp_ife_camif_res->is_dual_vfe);
4914
4915 /* ERROR check for Left VFE */
4916 if (!hw_res_l) {
4917 CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
4918 break;
4919 }
4920
4921 CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
4922 hw_res_l->hw_intf->hw_idx);
4923
4924 if (core_idx == hw_res_l->hw_intf->hw_idx) {
4925 status = hw_res_l->bottom_half_handler(
4926 hw_res_l, evt_payload);
4927 }
4928
4929 if (status)
4930 break;
4931
4932 /* ERROR check for Right VFE */
4933 if (!hw_res_r) {
4934 CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
4935 continue;
4936 }
4937 CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
4938 hw_res_r->hw_intf->hw_idx);
4939
4940 if (core_idx == hw_res_r->hw_intf->hw_idx) {
4941 status = hw_res_r->bottom_half_handler(
4942 hw_res_r, evt_payload);
4943 }
4944
4945 if (status)
4946 break;
4947 }
4948 CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
4949 return status;
4950}
4951
4952static int cam_ife_hw_mgr_handle_camif_error(
4953 void *handler_priv,
4954 void *payload)
4955{
4956 int32_t error_status;
4957 uint32_t core_idx;
4958 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
4959 struct cam_vfe_top_irq_evt_payload *evt_payload;
4960 struct cam_isp_hw_error_event_data error_event_data = {0};
4961 struct cam_hw_event_recovery_data recovery_data = {0};
4962 int rc = 0;
4963
4964 ife_hwr_mgr_ctx = handler_priv;
4965 evt_payload = payload;
4966 core_idx = evt_payload->core_index;
4967
4968 error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
4969 evt_payload);
Raja Mallikff6c75b2019-01-29 16:52:37 +05304970 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending)) {
4971 rc = error_status;
4972 goto end;
4973 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05304974
4975 switch (error_status) {
4976 case CAM_ISP_HW_ERROR_OVERFLOW:
4977 case CAM_ISP_HW_ERROR_P2I_ERROR:
4978 case CAM_ISP_HW_ERROR_VIOLATION:
4979 CAM_ERR(CAM_ISP, "Enter: error_type (%d)", error_status);
Raja Mallikff6c75b2019-01-29 16:52:37 +05304980 rc = error_status;
Raja Mallikc7e256f2018-12-06 17:36:28 +05304981 if (g_ife_hw_mgr.debug_cfg.enable_recovery)
4982 error_event_data.recovery_enabled = true;
4983
4984 error_event_data.error_type =
4985 CAM_ISP_HW_ERROR_OVERFLOW;
4986
Raja Mallikd268c822019-02-18 13:50:39 +05304987 error_event_data.enable_reg_dump =
4988 g_ife_hw_mgr.debug_cfg.enable_reg_dump;
4989
Raja Mallikc7e256f2018-12-06 17:36:28 +05304990 cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
4991 &error_event_data,
4992 core_idx,
4993 &recovery_data);
4994
4995 if (!g_ife_hw_mgr.debug_cfg.enable_recovery) {
4996 CAM_DBG(CAM_ISP, "recovery is not enabled");
4997 break;
4998 }
4999
5000 CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
5001 /* Trigger for recovery */
5002 recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
5003 cam_ife_hw_mgr_do_error_recovery(&recovery_data);
5004 break;
5005 default:
5006 CAM_DBG(CAM_ISP, "No error (%d)", error_status);
5007 break;
5008 }
5009
Raja Mallikff6c75b2019-01-29 16:52:37 +05305010end:
Raja Mallikc7e256f2018-12-06 17:36:28 +05305011 return rc;
5012}
5013
5014/*
5015 * DUAL VFE is valid for PIX processing path
5016 * This function assumes hw_res[0] is master in case
5017 * of dual VFE.
5018 * RDI path does not support DUAl VFE
5019 */
5020static int cam_ife_hw_mgr_handle_reg_update(
5021 void *handler_priv,
5022 void *payload)
5023{
5024 struct cam_isp_resource_node *hw_res;
5025 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
5026 struct cam_vfe_top_irq_evt_payload *evt_payload;
5027 struct cam_ife_hw_mgr_res *ife_src_res = NULL;
5028 cam_hw_event_cb_func ife_hwr_irq_rup_cb;
5029 struct cam_isp_hw_reg_update_event_data rup_event_data;
5030 uint32_t core_idx;
5031 uint32_t rup_status = -EINVAL;
5032
5033 CAM_DBG(CAM_ISP, "Enter");
5034
5035 ife_hwr_mgr_ctx = handler_priv;
5036 evt_payload = payload;
5037
5038 if (!handler_priv || !payload) {
5039 CAM_ERR(CAM_ISP, "Invalid Parameter");
5040 return -EPERM;
5041 }
5042
5043 core_idx = evt_payload->core_index;
5044 ife_hwr_irq_rup_cb =
5045 ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
5046
5047 evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
5048 list_for_each_entry(ife_src_res,
5049 &ife_hwr_mgr_ctx->res_list_ife_src, list) {
5050
5051 if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
5052 continue;
5053
5054 CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
5055 ife_src_res->res_id, core_idx);
5056 switch (ife_src_res->res_id) {
5057 case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
5058 break;
5059 case CAM_ISP_HW_VFE_IN_CAMIF:
Raja Mallikfe46d932019-02-12 20:34:07 +05305060 case CAM_ISP_HW_VFE_IN_RD:
Raja Mallikc7e256f2018-12-06 17:36:28 +05305061 if (ife_src_res->is_dual_vfe)
5062 /* It checks for slave core RUP ACK*/
5063 hw_res = ife_src_res->hw_res[1];
5064 else
5065 hw_res = ife_src_res->hw_res[0];
5066
5067 if (!hw_res) {
5068 CAM_ERR(CAM_ISP, "CAMIF device is NULL");
5069 break;
5070 }
5071 CAM_DBG(CAM_ISP,
5072 "current_core_id = %d , core_idx res = %d",
5073 core_idx, hw_res->hw_intf->hw_idx);
5074
5075 if (core_idx == hw_res->hw_intf->hw_idx) {
5076 rup_status = hw_res->bottom_half_handler(
5077 hw_res, evt_payload);
5078 }
5079
5080 if (ife_src_res->is_dual_vfe) {
5081 hw_res = ife_src_res->hw_res[0];
5082 if (core_idx == hw_res->hw_intf->hw_idx) {
5083 hw_res->bottom_half_handler(
5084 hw_res, evt_payload);
5085 }
5086 }
5087
5088 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5089 break;
5090
5091 if (!rup_status) {
Raja Mallike3ed1a32019-08-22 17:12:32 +05305092 rup_event_data.irq_mono_boot_time =
5093 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305094 ife_hwr_irq_rup_cb(
5095 ife_hwr_mgr_ctx->common.cb_priv,
5096 CAM_ISP_HW_EVENT_REG_UPDATE,
5097 &rup_event_data);
5098 }
5099 break;
5100
5101 case CAM_ISP_HW_VFE_IN_RDI0:
5102 case CAM_ISP_HW_VFE_IN_RDI1:
5103 case CAM_ISP_HW_VFE_IN_RDI2:
5104 case CAM_ISP_HW_VFE_IN_RDI3:
5105 hw_res = ife_src_res->hw_res[0];
5106
5107 if (!hw_res) {
5108 CAM_ERR(CAM_ISP, "RDI Device is NULL");
5109 break;
5110 }
5111
5112 if (core_idx == hw_res->hw_intf->hw_idx)
5113 rup_status = hw_res->bottom_half_handler(
5114 hw_res, evt_payload);
5115
Raja Mallikff6c75b2019-01-29 16:52:37 +05305116 if (ife_hwr_mgr_ctx->is_rdi_only_context == 0 &&
5117 ife_hwr_mgr_ctx->is_fe_enable == false)
Raja Mallikc7e256f2018-12-06 17:36:28 +05305118 continue;
5119
5120 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5121 break;
5122 if (!rup_status) {
Raja Mallike3ed1a32019-08-22 17:12:32 +05305123 rup_event_data.irq_mono_boot_time =
5124 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305125 /* Send the Reg update hw event */
5126 ife_hwr_irq_rup_cb(
5127 ife_hwr_mgr_ctx->common.cb_priv,
5128 CAM_ISP_HW_EVENT_REG_UPDATE,
5129 &rup_event_data);
5130 }
5131 break;
5132 default:
5133 CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
5134 ife_src_res->res_id);
5135 }
5136
5137 }
5138
5139 if (!rup_status)
5140 CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
5141
5142 return 0;
5143}
5144
5145static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
5146 struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx,
5147 uint32_t core_idx0,
5148 uint32_t core_idx1,
5149 uint32_t hw_event_type)
5150{
5151 int32_t rc = -1;
5152 uint32_t *event_cnt = NULL;
5153
5154 switch (hw_event_type) {
5155 case CAM_ISP_HW_EVENT_SOF:
5156 event_cnt = ife_hw_mgr_ctx->sof_cnt;
5157 break;
5158 case CAM_ISP_HW_EVENT_EPOCH:
5159 event_cnt = ife_hw_mgr_ctx->epoch_cnt;
5160 break;
5161 case CAM_ISP_HW_EVENT_EOF:
5162 event_cnt = ife_hw_mgr_ctx->eof_cnt;
5163 break;
5164 default:
5165 return 0;
5166 }
5167
5168 if (event_cnt[core_idx0] ==
5169 event_cnt[core_idx1]) {
5170
5171 event_cnt[core_idx0] = 0;
5172 event_cnt[core_idx1] = 0;
5173
5174 rc = 0;
5175 return rc;
5176 }
5177
5178 if ((event_cnt[core_idx0] &&
5179 (event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) ||
5180 (event_cnt[core_idx1] &&
5181 (event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) {
5182
Raja Mallik477ca242019-06-21 14:23:32 +05305183 if (ife_hw_mgr_ctx->dual_ife_irq_mismatch_cnt > 10) {
5184 rc = -1;
5185 return rc;
5186 }
5187
Raja Mallikc7e256f2018-12-06 17:36:28 +05305188 CAM_ERR_RATE_LIMIT(CAM_ISP,
5189 "One of the VFE could not generate hw event %d",
5190 hw_event_type);
Raja Mallik477ca242019-06-21 14:23:32 +05305191 if (event_cnt[core_idx0] >= 2) {
5192 event_cnt[core_idx0]--;
5193 ife_hw_mgr_ctx->dual_ife_irq_mismatch_cnt++;
5194 }
5195 if (event_cnt[core_idx1] >= 2) {
5196 event_cnt[core_idx1]--;
5197 ife_hw_mgr_ctx->dual_ife_irq_mismatch_cnt++;
5198 }
5199
5200 if (ife_hw_mgr_ctx->dual_ife_irq_mismatch_cnt == 1)
5201 cam_ife_mgr_ctx_irq_dump(ife_hw_mgr_ctx);
5202 rc = 0;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305203 }
5204
5205 CAM_DBG(CAM_ISP, "Only one core_index has given hw event %d",
5206 hw_event_type);
5207
5208 return rc;
5209}
5210
5211static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
5212 void *handler_priv,
5213 void *payload)
5214{
5215 int32_t rc = -EINVAL;
5216 struct cam_isp_resource_node *hw_res_l;
5217 struct cam_isp_resource_node *hw_res_r;
5218 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
5219 struct cam_vfe_top_irq_evt_payload *evt_payload;
5220 struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
5221 cam_hw_event_cb_func ife_hwr_irq_epoch_cb;
5222 struct cam_isp_hw_epoch_event_data epoch_done_event_data;
5223 uint32_t core_idx;
5224 uint32_t epoch_status = -EINVAL;
5225 uint32_t core_index0;
5226 uint32_t core_index1;
5227
5228 CAM_DBG(CAM_ISP, "Enter");
5229
5230 ife_hwr_mgr_ctx = handler_priv;
5231 evt_payload = payload;
5232 ife_hwr_irq_epoch_cb =
5233 ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
5234 core_idx = evt_payload->core_index;
5235
5236 evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
5237
5238 list_for_each_entry(isp_ife_camif_res,
5239 &ife_hwr_mgr_ctx->res_list_ife_src, list) {
5240 if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
Raja Mallikfe46d932019-02-12 20:34:07 +05305241 || (isp_ife_camif_res->res_id >
5242 CAM_ISP_HW_VFE_IN_RD)) {
Raja Mallikc7e256f2018-12-06 17:36:28 +05305243 continue;
Raja Mallikfe46d932019-02-12 20:34:07 +05305244 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05305245
5246 hw_res_l = isp_ife_camif_res->hw_res[0];
5247 hw_res_r = isp_ife_camif_res->hw_res[1];
5248
5249 switch (isp_ife_camif_res->is_dual_vfe) {
5250 /* Handling Single VFE Scenario */
5251 case 0:
5252 /* EPOCH check for Left side VFE */
5253 if (!hw_res_l) {
5254 CAM_ERR(CAM_ISP, "Left Device is NULL");
5255 break;
5256 }
5257
5258 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5259 epoch_status = hw_res_l->bottom_half_handler(
5260 hw_res_l, evt_payload);
5261 if (atomic_read(
5262 &ife_hwr_mgr_ctx->overflow_pending))
5263 break;
5264 if (!epoch_status)
5265 ife_hwr_irq_epoch_cb(
5266 ife_hwr_mgr_ctx->common.cb_priv,
5267 CAM_ISP_HW_EVENT_EPOCH,
5268 &epoch_done_event_data);
5269 }
5270
5271 break;
5272
5273 /* Handling Dual VFE Scenario */
5274 case 1:
5275 /* SOF check for Left side VFE (Master)*/
5276
5277 if ((!hw_res_l) || (!hw_res_r)) {
5278 CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
5279 break;
5280 }
5281 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5282 epoch_status = hw_res_l->bottom_half_handler(
5283 hw_res_l, evt_payload);
5284
5285 if (!epoch_status)
5286 ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
5287 else
5288 break;
5289 }
5290
5291 /* SOF check for Right side VFE */
5292 if (core_idx == hw_res_r->hw_intf->hw_idx) {
5293 epoch_status = hw_res_r->bottom_half_handler(
5294 hw_res_r, evt_payload);
5295
5296 if (!epoch_status)
5297 ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
5298 else
5299 break;
5300 }
5301
5302 core_index0 = hw_res_l->hw_intf->hw_idx;
5303 core_index1 = hw_res_r->hw_intf->hw_idx;
5304
5305 rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
5306 ife_hwr_mgr_ctx,
5307 core_index0,
5308 core_index1,
5309 evt_payload->evt_id);
5310
5311 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5312 break;
5313 if (!rc)
5314 ife_hwr_irq_epoch_cb(
5315 ife_hwr_mgr_ctx->common.cb_priv,
5316 CAM_ISP_HW_EVENT_EPOCH,
5317 &epoch_done_event_data);
5318
5319 break;
5320
5321 /* Error */
5322 default:
5323 CAM_ERR(CAM_ISP, "error with hw_res");
5324
5325 }
5326 }
5327
5328 if (!epoch_status)
5329 CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
5330
5331 return 0;
5332}
5333
5334static int cam_ife_hw_mgr_process_camif_sof(
5335 struct cam_ife_hw_mgr_res *isp_ife_camif_res,
5336 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
5337 struct cam_vfe_top_irq_evt_payload *evt_payload)
5338{
5339 struct cam_isp_resource_node *hw_res_l = NULL;
5340 struct cam_isp_resource_node *hw_res_r = NULL;
5341 int32_t rc = -EINVAL;
5342 uint32_t core_idx;
5343 uint32_t sof_status = 0;
5344 uint32_t core_index0;
5345 uint32_t core_index1;
5346
5347 CAM_DBG(CAM_ISP, "Enter");
5348 core_idx = evt_payload->core_index;
5349 hw_res_l = isp_ife_camif_res->hw_res[0];
5350 hw_res_r = isp_ife_camif_res->hw_res[1];
5351 CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
5352 isp_ife_camif_res->is_dual_vfe);
5353
5354 switch (isp_ife_camif_res->is_dual_vfe) {
5355 /* Handling Single VFE Scenario */
5356 case 0:
5357 /* SOF check for Left side VFE */
5358 if (!hw_res_l) {
5359 CAM_ERR(CAM_ISP, "VFE Device is NULL");
5360 break;
5361 }
5362 CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
5363 core_idx, hw_res_l->hw_intf->hw_idx);
5364
5365 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5366 sof_status = hw_res_l->bottom_half_handler(hw_res_l,
5367 evt_payload);
5368 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5369 break;
5370 if (!sof_status)
5371 rc = 0;
5372 }
5373
5374 break;
5375
5376 /* Handling Dual VFE Scenario */
5377 case 1:
5378 /* SOF check for Left side VFE */
5379
5380 if (!hw_res_l) {
5381 CAM_ERR(CAM_ISP, "VFE Device is NULL");
5382 break;
5383 }
5384 CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
5385 core_idx,
5386 hw_res_l->hw_intf->hw_idx);
5387
5388 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5389 sof_status = hw_res_l->bottom_half_handler(
5390 hw_res_l, evt_payload);
5391 if (!sof_status)
5392 ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
5393 else
5394 break;
5395 }
5396
5397 /* SOF check for Right side VFE */
5398 if (!hw_res_r) {
5399 CAM_ERR(CAM_ISP, "VFE Device is NULL");
5400 break;
5401 }
5402 CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
5403 core_idx,
5404 hw_res_r->hw_intf->hw_idx);
5405 if (core_idx == hw_res_r->hw_intf->hw_idx) {
5406 sof_status = hw_res_r->bottom_half_handler(hw_res_r,
5407 evt_payload);
5408 if (!sof_status)
5409 ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
5410 else
5411 break;
5412 }
5413
5414 core_index0 = hw_res_l->hw_intf->hw_idx;
5415 core_index1 = hw_res_r->hw_intf->hw_idx;
5416
5417 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5418 break;
5419
5420 rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
5421 core_index0, core_index1, evt_payload->evt_id);
5422
5423 break;
5424
5425 default:
5426 CAM_ERR(CAM_ISP, "error with hw_res");
5427 break;
5428 }
5429
5430 CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
5431
5432 return rc;
5433}
5434
5435static int cam_ife_hw_mgr_handle_sof(
5436 void *handler_priv,
5437 void *payload)
5438{
5439 struct cam_isp_resource_node *hw_res = NULL;
5440 struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx;
5441 struct cam_vfe_top_irq_evt_payload *evt_payload;
5442 struct cam_ife_hw_mgr_res *ife_src_res = NULL;
5443 cam_hw_event_cb_func ife_hw_irq_sof_cb;
5444 struct cam_isp_hw_sof_event_data sof_done_event_data;
5445 uint32_t sof_status = 0;
5446 bool sof_sent = false;
5447
5448 CAM_DBG(CAM_ISP, "Enter");
5449
5450 ife_hw_mgr_ctx = handler_priv;
5451 evt_payload = payload;
5452 if (!evt_payload) {
5453 CAM_ERR(CAM_ISP, "no payload");
5454 return IRQ_HANDLED;
5455 }
5456 ife_hw_irq_sof_cb =
5457 ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
5458
5459 evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
5460
5461 list_for_each_entry(ife_src_res,
5462 &ife_hw_mgr_ctx->res_list_ife_src, list) {
5463
5464 if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
5465 continue;
5466
5467 switch (ife_src_res->res_id) {
5468 case CAM_ISP_HW_VFE_IN_RDI0:
5469 case CAM_ISP_HW_VFE_IN_RDI1:
5470 case CAM_ISP_HW_VFE_IN_RDI2:
5471 case CAM_ISP_HW_VFE_IN_RDI3:
5472 hw_res = ife_src_res->hw_res[0];
5473 sof_status = hw_res->bottom_half_handler(
5474 hw_res, evt_payload);
5475
5476 /* check if it is rdi only context */
Raja Mallikff6c75b2019-01-29 16:52:37 +05305477 if (ife_hw_mgr_ctx->is_fe_enable ||
5478 ife_hw_mgr_ctx->is_rdi_only_context) {
Raja Mallikc7e256f2018-12-06 17:36:28 +05305479 if (!sof_status && !sof_sent) {
5480 cam_ife_mgr_cmd_get_sof_timestamp(
5481 ife_hw_mgr_ctx,
5482 &sof_done_event_data.timestamp,
5483 &sof_done_event_data.boot_time);
Raja Mallike3ed1a32019-08-22 17:12:32 +05305484 sof_done_event_data.irq_mono_boot_time =
5485 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305486
5487 ife_hw_irq_sof_cb(
5488 ife_hw_mgr_ctx->common.cb_priv,
5489 CAM_ISP_HW_EVENT_SOF,
5490 &sof_done_event_data);
Raja Mallikff6c75b2019-01-29 16:52:37 +05305491 CAM_DBG(CAM_ISP, "RDI sof_status = %d",
Raja Mallikc7e256f2018-12-06 17:36:28 +05305492 sof_status);
5493
5494 sof_sent = true;
5495 }
5496
5497 }
5498 break;
5499
5500 case CAM_ISP_HW_VFE_IN_CAMIF:
Raja Mallikfe46d932019-02-12 20:34:07 +05305501 case CAM_ISP_HW_VFE_IN_RD:
Raja Mallikc7e256f2018-12-06 17:36:28 +05305502 sof_status = cam_ife_hw_mgr_process_camif_sof(
5503 ife_src_res, ife_hw_mgr_ctx, evt_payload);
5504 if (!sof_status && !sof_sent) {
5505 cam_ife_mgr_cmd_get_sof_timestamp(
5506 ife_hw_mgr_ctx,
5507 &sof_done_event_data.timestamp,
5508 &sof_done_event_data.boot_time);
Raja Mallike3ed1a32019-08-22 17:12:32 +05305509 sof_done_event_data.irq_mono_boot_time =
5510 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305511
5512 ife_hw_irq_sof_cb(
5513 ife_hw_mgr_ctx->common.cb_priv,
5514 CAM_ISP_HW_EVENT_SOF,
5515 &sof_done_event_data);
5516 CAM_DBG(CAM_ISP, "sof_status = %d",
5517 sof_status);
5518
5519 sof_sent = true;
5520 }
5521 break;
5522 case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
5523 break;
5524 default:
5525 CAM_ERR(CAM_ISP, "Invalid resource id :%d",
5526 ife_src_res->res_id);
5527 break;
5528 }
5529 }
5530
5531 return 0;
5532}
5533
5534static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
5535 void *handler_priv,
5536 void *payload)
5537{
5538 int32_t rc = -EINVAL;
5539 struct cam_isp_resource_node *hw_res_l = NULL;
5540 struct cam_isp_resource_node *hw_res_r = NULL;
5541 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
5542 struct cam_vfe_top_irq_evt_payload *evt_payload;
5543 struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
5544 cam_hw_event_cb_func ife_hwr_irq_eof_cb;
5545 struct cam_isp_hw_eof_event_data eof_done_event_data;
5546 uint32_t core_idx;
5547 uint32_t eof_status = 0;
5548 uint32_t core_index0;
5549 uint32_t core_index1;
5550
5551 CAM_DBG(CAM_ISP, "Enter");
5552
5553 ife_hwr_mgr_ctx = handler_priv;
5554 evt_payload = payload;
5555 if (!evt_payload) {
5556 pr_err("%s: no payload\n", __func__);
5557 return IRQ_HANDLED;
5558 }
5559 core_idx = evt_payload->core_index;
5560 ife_hwr_irq_eof_cb =
5561 ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
5562
5563 evt_payload->evt_id = CAM_ISP_HW_EVENT_EOF;
5564
5565 list_for_each_entry(isp_ife_camif_res,
5566 &ife_hwr_mgr_ctx->res_list_ife_src, list) {
5567
5568 if ((isp_ife_camif_res->res_type ==
5569 CAM_IFE_HW_MGR_RES_UNINIT) ||
5570 (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
5571 continue;
5572
5573 hw_res_l = isp_ife_camif_res->hw_res[0];
5574 hw_res_r = isp_ife_camif_res->hw_res[1];
5575
5576 CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
5577 isp_ife_camif_res->is_dual_vfe);
5578 switch (isp_ife_camif_res->is_dual_vfe) {
5579 /* Handling Single VFE Scenario */
5580 case 0:
5581 /* EOF check for Left side VFE */
5582 if (!hw_res_l) {
5583 pr_err("%s: VFE Device is NULL\n",
5584 __func__);
5585 break;
5586 }
5587 CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
5588 core_idx, hw_res_l->hw_intf->hw_idx);
5589
5590 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5591 eof_status = hw_res_l->bottom_half_handler(
5592 hw_res_l, evt_payload);
5593 if (atomic_read(
5594 &ife_hwr_mgr_ctx->overflow_pending))
5595 break;
Raja Mallike3ed1a32019-08-22 17:12:32 +05305596 if (!eof_status) {
5597 eof_done_event_data.irq_mono_boot_time =
5598 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305599 ife_hwr_irq_eof_cb(
5600 ife_hwr_mgr_ctx->common.cb_priv,
5601 CAM_ISP_HW_EVENT_EOF,
5602 &eof_done_event_data);
Raja Mallike3ed1a32019-08-22 17:12:32 +05305603 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05305604 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05305605 break;
5606 /* Handling dual VFE Scenario */
5607 case 1:
5608 if ((!hw_res_l) || (!hw_res_r)) {
5609 CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
5610 break;
5611 }
5612 if (core_idx == hw_res_l->hw_intf->hw_idx) {
5613 eof_status = hw_res_l->bottom_half_handler(
5614 hw_res_l, evt_payload);
5615
5616 if (!eof_status)
5617 ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
5618 else
5619 break;
5620 }
5621
5622 /* EOF check for Right side VFE */
5623 if (core_idx == hw_res_r->hw_intf->hw_idx) {
5624 eof_status = hw_res_r->bottom_half_handler(
5625 hw_res_r, evt_payload);
5626
5627 if (!eof_status)
5628 ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
5629 else
5630 break;
5631 }
5632
5633 core_index0 = hw_res_l->hw_intf->hw_idx;
5634 core_index1 = hw_res_r->hw_intf->hw_idx;
5635
5636 rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
5637 ife_hwr_mgr_ctx,
5638 core_index0,
5639 core_index1,
5640 evt_payload->evt_id);
5641
5642 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5643 break;
5644
Raja Mallike3ed1a32019-08-22 17:12:32 +05305645 if (!rc) {
5646 eof_done_event_data.irq_mono_boot_time =
5647 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305648 ife_hwr_irq_eof_cb(
5649 ife_hwr_mgr_ctx->common.cb_priv,
5650 CAM_ISP_HW_EVENT_EOF,
5651 &eof_done_event_data);
Raja Mallike3ed1a32019-08-22 17:12:32 +05305652 }
Raja Mallikc7e256f2018-12-06 17:36:28 +05305653
5654 break;
5655
5656 default:
5657 CAM_ERR(CAM_ISP, "error with hw_res");
5658 }
5659 }
5660
5661 CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
5662
5663 return 0;
5664}
5665
5666
5667static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
5668 void *handler_priv,
5669 void *payload)
5670
5671{
5672 int32_t buf_done_status = 0;
5673 int32_t i;
5674 int32_t rc = 0;
5675 cam_hw_event_cb_func ife_hwr_irq_wm_done_cb;
5676 struct cam_isp_resource_node *hw_res_l = NULL;
5677 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
5678 struct cam_vfe_bus_irq_evt_payload *evt_payload = payload;
5679 struct cam_ife_hw_mgr_res *isp_ife_out_res = NULL;
5680 struct cam_hw_event_recovery_data recovery_data;
5681 struct cam_isp_hw_done_event_data buf_done_event_data = {0};
5682 struct cam_isp_hw_error_event_data error_event_data = {0};
5683 uint32_t error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
5684 uint32_t num_of_error_handles = 0;
5685
5686 CAM_DBG(CAM_ISP, "Enter");
5687
5688 ife_hwr_mgr_ctx = evt_payload->ctx;
5689 ife_hwr_irq_wm_done_cb =
5690 ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
5691
5692 evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
5693
5694 for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
5695 isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
5696
5697 if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
5698 continue;
5699
5700 hw_res_l = isp_ife_out_res->hw_res[0];
5701
5702 /*
5703 * DUAL VFE: Index 0 is always a master. In case of composite
5704 * Error, if the error is not in master, it needs to be checked
5705 * in slave (for debuging purpose only) For other cases:
5706 * Index zero is valid
5707 */
5708
5709 if (hw_res_l && (evt_payload->core_index ==
5710 hw_res_l->hw_intf->hw_idx))
5711 buf_done_status = hw_res_l->bottom_half_handler(
5712 hw_res_l, evt_payload);
5713 else
5714 continue;
5715
5716 switch (buf_done_status) {
5717 case CAM_VFE_IRQ_STATUS_ERR_COMP:
5718 /*
5719 * Write interface can pipeline upto 2 buffer done
5720 * strobes from each write client. If any of the client
5721 * triggers a third buffer done strobe before a
5722 * composite interrupt based on the first buffer doneis
5723 * triggered an error irq is set. This scenario can
5724 * only happen if a client is 3 frames ahead of the
5725 * other clients enabled in the same composite mask.
5726 */
5727 case CAM_VFE_IRQ_STATUS_COMP_OWRT:
5728 /*
5729 * It is an indication that bandwidth is not sufficient
5730 * to generate composite done irq within the VBI time.
5731 */
5732
5733 error_resc_handle[num_of_error_handles++] =
5734 isp_ife_out_res->res_id;
5735
5736 if (num_of_error_handles > 0) {
5737 error_event_data.error_type =
5738 CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
5739 goto err;
5740 }
5741
5742 break;
5743 case CAM_VFE_IRQ_STATUS_ERR:
5744 break;
5745 case CAM_VFE_IRQ_STATUS_SUCCESS:
5746 buf_done_event_data.num_handles = 1;
5747 buf_done_event_data.resource_handle[0] =
5748 isp_ife_out_res->res_id;
5749
5750 if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
5751 break;
Raja Mallike3ed1a32019-08-22 17:12:32 +05305752 buf_done_event_data.irq_mono_boot_time =
5753 evt_payload->ts.time_usecs;
Raja Mallikc7e256f2018-12-06 17:36:28 +05305754 /* Report for Successful buf_done event if any */
5755 if (buf_done_event_data.num_handles > 0 &&
5756 ife_hwr_irq_wm_done_cb) {
5757 CAM_DBG(CAM_ISP, "notify isp context");
5758 ife_hwr_irq_wm_done_cb(
5759 ife_hwr_mgr_ctx->common.cb_priv,
5760 CAM_ISP_HW_EVENT_DONE,
5761 &buf_done_event_data);
5762 }
5763
5764 break;
5765 default:
5766 /* Do NOTHING */
5767 error_resc_handle[num_of_error_handles++] =
5768 isp_ife_out_res->res_id;
5769 if (num_of_error_handles > 0) {
5770 error_event_data.error_type =
5771 CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
5772 goto err;
5773 }
5774 break;
5775 }
5776 if (!buf_done_status)
5777 CAM_DBG(CAM_ISP,
5778 "buf_done status:(%d),out_res->res_id: 0x%x",
5779 buf_done_status, isp_ife_out_res->res_id);
5780 }
5781
5782 return rc;
5783
5784err:
5785 /*
5786 * Report for error if any.
5787 * For the first phase, Error is reported as overflow, for all
5788 * the affected context and any successful buf_done event is not
5789 * reported.
5790 */
5791 rc = cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
5792 &error_event_data, evt_payload->core_index,
5793 &recovery_data);
5794
5795 /*
5796 * We can temporarily return from here as
5797 * for the first phase, we are going to reset entire HW.
5798 */
5799
5800 CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
5801 buf_done_status);
5802 return rc;
5803}
5804
5805int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
5806 void *evt_payload_priv)
5807{
5808 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
5809 struct cam_vfe_bus_irq_evt_payload *evt_payload;
5810 int rc = -EINVAL;
5811
5812 if (!handler_priv)
5813 return rc;
5814
5815 evt_payload = evt_payload_priv;
5816 ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
5817
5818 CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
5819 evt_payload, evt_payload->core_index);
5820 CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
5821 CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
5822 CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
5823 CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
5824 CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
5825 evt_payload->irq_reg_val[4]);
5826 CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
5827 evt_payload->irq_reg_val[5]);
5828 CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
5829 evt_payload->irq_reg_val[6]);
5830 /* WM Done */
5831 return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
5832 evt_payload_priv);
5833}
5834
5835int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
5836{
5837 struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
5838 struct cam_vfe_top_irq_evt_payload *evt_payload;
5839 int rc = -EINVAL;
5840
5841 if (!evt_payload_priv)
5842 return rc;
5843
5844 evt_payload = evt_payload_priv;
5845 if (!handler_priv)
5846 return rc;
5847
5848 ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
5849
5850 CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
5851 (void *)evt_payload,
5852 evt_payload->core_index);
5853 CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
5854 CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
5855 CAM_DBG(CAM_ISP, "Violation register: = %x",
5856 evt_payload->irq_reg_val[2]);
5857
5858 /*
5859 * If overflow/overwrite/error/violation are pending
5860 * for this context it needs to be handled remaining
5861 * interrupts are ignored.
5862 */
5863 rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
5864 evt_payload_priv);
5865
5866 if (rc) {
5867 CAM_ERR_RATE_LIMIT(CAM_ISP,
5868 "Encountered Error (%d), ignoring other irqs",
5869 rc);
5870 goto put_payload;
5871 }
5872
5873 CAM_DBG(CAM_ISP, "Calling EOF");
5874 cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
5875 evt_payload_priv);
5876
5877 CAM_DBG(CAM_ISP, "Calling SOF");
5878 /* SOF IRQ */
5879 cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
5880 evt_payload_priv);
5881
5882 CAM_DBG(CAM_ISP, "Calling RUP");
5883 /* REG UPDATE */
5884 cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
5885 evt_payload_priv);
5886
5887 CAM_DBG(CAM_ISP, "Calling EPOCH");
5888 /* EPOCH IRQ */
5889 cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
5890 evt_payload_priv);
5891
5892put_payload:
5893 cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
5894 return IRQ_HANDLED;
5895}
5896
5897static int cam_ife_hw_mgr_sort_dev_with_caps(
5898 struct cam_ife_hw_mgr *ife_hw_mgr)
5899{
5900 int i;
5901
5902 /* get caps for csid devices */
5903 for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
5904 if (!ife_hw_mgr->csid_devices[i])
5905 continue;
5906 if (ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) {
5907 ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps(
5908 ife_hw_mgr->csid_devices[i]->hw_priv,
5909 &ife_hw_mgr->ife_csid_dev_caps[i],
5910 sizeof(ife_hw_mgr->ife_csid_dev_caps[i]));
5911 }
5912 }
5913
5914 /* get caps for ife devices */
5915 for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
5916 if (!ife_hw_mgr->ife_devices[i])
5917 continue;
5918 if (ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps) {
5919 ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps(
5920 ife_hw_mgr->ife_devices[i]->hw_priv,
5921 &ife_hw_mgr->ife_dev_caps[i],
5922 sizeof(ife_hw_mgr->ife_dev_caps[i]));
5923 }
5924 }
5925
5926 return 0;
5927}
5928
5929static int cam_ife_set_csid_debug(void *data, u64 val)
5930{
5931 g_ife_hw_mgr.debug_cfg.csid_debug = val;
5932 CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val);
5933 return 0;
5934}
5935
5936static int cam_ife_get_csid_debug(void *data, u64 *val)
5937{
5938 *val = g_ife_hw_mgr.debug_cfg.csid_debug;
5939 CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld",
5940 g_ife_hw_mgr.debug_cfg.csid_debug);
5941
5942 return 0;
5943}
5944
5945DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug,
5946 cam_ife_get_csid_debug,
5947 cam_ife_set_csid_debug, "%16llu");
5948
5949static int cam_ife_set_camif_debug(void *data, u64 val)
5950{
5951 g_ife_hw_mgr.debug_cfg.camif_debug = val;
5952 CAM_DBG(CAM_ISP,
5953 "Set camif enable_diag_sensor_status value :%lld", val);
5954 return 0;
5955}
5956
5957static int cam_ife_get_camif_debug(void *data, u64 *val)
5958{
5959 *val = g_ife_hw_mgr.debug_cfg.camif_debug;
5960 CAM_DBG(CAM_ISP,
5961 "Set camif enable_diag_sensor_status value :%lld",
5962 g_ife_hw_mgr.debug_cfg.csid_debug);
5963
5964 return 0;
5965}
5966
5967DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
5968 cam_ife_get_camif_debug,
5969 cam_ife_set_camif_debug, "%16llu");
5970
5971static int cam_ife_hw_mgr_debug_register(void)
5972{
5973 g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
5974 NULL);
5975
5976 if (!g_ife_hw_mgr.debug_cfg.dentry) {
5977 CAM_ERR(CAM_ISP, "failed to create dentry");
5978 return -ENOMEM;
5979 }
5980
Raja Mallikd268c822019-02-18 13:50:39 +05305981 if (!debugfs_create_u32("enable_reg_dump",
5982 0644,
5983 g_ife_hw_mgr.debug_cfg.dentry,
5984 &g_ife_hw_mgr.debug_cfg.enable_reg_dump)) {
5985 CAM_ERR(CAM_ISP, "failed to create enable_reg_dump");
5986 goto err;
5987 }
5988
Raja Mallikc7e256f2018-12-06 17:36:28 +05305989 if (!debugfs_create_file("ife_csid_debug",
5990 0644,
5991 g_ife_hw_mgr.debug_cfg.dentry, NULL,
5992 &cam_ife_csid_debug)) {
5993 CAM_ERR(CAM_ISP, "failed to create cam_ife_csid_debug");
5994 goto err;
5995 }
5996
5997 if (!debugfs_create_u32("enable_recovery",
5998 0644,
5999 g_ife_hw_mgr.debug_cfg.dentry,
6000 &g_ife_hw_mgr.debug_cfg.enable_recovery)) {
6001 CAM_ERR(CAM_ISP, "failed to create enable_recovery");
6002 goto err;
6003 }
6004
6005 if (!debugfs_create_file("ife_camif_debug",
6006 0644,
6007 g_ife_hw_mgr.debug_cfg.dentry, NULL,
6008 &cam_ife_camif_debug)) {
6009 CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
6010 goto err;
6011 }
6012 g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
6013
6014 return 0;
6015
6016err:
6017 debugfs_remove_recursive(g_ife_hw_mgr.debug_cfg.dentry);
6018 return -ENOMEM;
6019}
6020
6021int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
6022{
6023 int rc = -EFAULT;
6024 int i, j;
6025 struct cam_iommu_handle cdm_handles;
6026 struct cam_ife_hw_mgr_ctx *ctx_pool;
6027 struct cam_ife_hw_mgr_res *res_list_ife_out;
6028
6029 CAM_DBG(CAM_ISP, "Enter");
6030
6031 memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
6032
6033 mutex_init(&g_ife_hw_mgr.ctx_mutex);
6034
6035 if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
6036 CAM_ERR(CAM_ISP, "CSID num is different then IFE num");
6037 return -EINVAL;
6038 }
6039
6040 /* fill ife hw intf information */
6041 for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
6042 rc = cam_vfe_hw_init(&g_ife_hw_mgr.ife_devices[i], i);
6043 if (!rc) {
6044 struct cam_hw_info *vfe_hw =
6045 (struct cam_hw_info *)
6046 g_ife_hw_mgr.ife_devices[i]->hw_priv;
6047 struct cam_hw_soc_info *soc_info = &vfe_hw->soc_info;
6048
6049 j++;
6050
6051 g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
6052 CAM_DBG(CAM_ISP,
6053 "reg_map: mem base = %pK cam_base = 0x%llx",
6054 (void __iomem *)soc_info->reg_map[0].mem_base,
6055 (uint64_t) soc_info->reg_map[0].mem_cam_base);
6056 } else {
6057 g_ife_hw_mgr.cdm_reg_map[i] = NULL;
6058 }
6059 }
6060 if (j == 0) {
6061 CAM_ERR(CAM_ISP, "no valid IFE HW");
6062 return -EINVAL;
6063 }
6064
6065 /* fill csid hw intf information */
6066 for (i = 0, j = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
6067 rc = cam_ife_csid_hw_init(&g_ife_hw_mgr.csid_devices[i], i);
6068 if (!rc)
6069 j++;
6070 }
6071 if (!j) {
6072 CAM_ERR(CAM_ISP, "no valid IFE CSID HW");
6073 return -EINVAL;
6074 }
6075
6076 cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
6077
6078 /* setup ife context list */
6079 INIT_LIST_HEAD(&g_ife_hw_mgr.free_ctx_list);
6080 INIT_LIST_HEAD(&g_ife_hw_mgr.used_ctx_list);
6081
6082 /*
6083 * for now, we only support one iommu handle. later
6084 * we will need to setup more iommu handle for other
6085 * use cases.
6086 * Also, we have to release them once we have the
6087 * deinit support
6088 */
6089 if (cam_smmu_get_handle("ife",
6090 &g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
6091 CAM_ERR(CAM_ISP, "Can not get iommu handle");
6092 return -EINVAL;
6093 }
6094
6095 if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
6096 CAM_SMMU_ATTACH)) {
6097 CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
6098 goto attach_fail;
6099 }
6100
6101 if (cam_smmu_get_handle("cam-secure",
6102 &g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
6103 CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
6104 goto secure_fail;
6105 }
6106
6107 CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
6108 g_ife_hw_mgr.mgr_common.img_iommu_hdl,
6109 g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
6110
6111 if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
6112 CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
6113 g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
6114 g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
6115 cdm_handles.secure;
6116 } else {
6117 CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
6118 g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
6119 g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
6120 }
6121
6122 atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
6123 for (i = 0; i < CAM_CTX_MAX; i++) {
6124 memset(&g_ife_hw_mgr.ctx_pool[i], 0,
6125 sizeof(g_ife_hw_mgr.ctx_pool[i]));
6126 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list);
6127
6128 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in.list);
6129 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_cid);
6130 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_csid);
6131 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_src);
Raja Mallikfe46d932019-02-12 20:34:07 +05306132 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in_rd);
Raja Mallikc7e256f2018-12-06 17:36:28 +05306133 ctx_pool = &g_ife_hw_mgr.ctx_pool[i];
6134 for (j = 0; j < CAM_IFE_HW_OUT_RES_MAX; j++) {
6135 res_list_ife_out = &ctx_pool->res_list_ife_out[j];
6136 INIT_LIST_HEAD(&res_list_ife_out->list);
6137 }
6138
6139 /* init context pool */
6140 INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].free_res_list);
6141 for (j = 0; j < CAM_IFE_HW_RES_POOL_MAX; j++) {
6142 INIT_LIST_HEAD(
6143 &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list);
6144 list_add_tail(
6145 &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list,
6146 &g_ife_hw_mgr.ctx_pool[i].free_res_list);
6147 }
6148
6149 g_ife_hw_mgr.ctx_pool[i].cdm_cmd =
6150 kzalloc(((sizeof(struct cam_cdm_bl_request)) +
6151 ((CAM_IFE_HW_ENTRIES_MAX - 1) *
6152 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
6153 if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
6154 rc = -ENOMEM;
6155 CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
6156 goto end;
6157 }
6158
6159 g_ife_hw_mgr.ctx_pool[i].ctx_index = i;
6160 g_ife_hw_mgr.ctx_pool[i].hw_mgr = &g_ife_hw_mgr;
6161
6162 cam_tasklet_init(&g_ife_hw_mgr.mgr_common.tasklet_pool[i],
6163 &g_ife_hw_mgr.ctx_pool[i], i);
6164 g_ife_hw_mgr.ctx_pool[i].common.tasklet_info =
6165 g_ife_hw_mgr.mgr_common.tasklet_pool[i];
6166
6167
6168 init_completion(&g_ife_hw_mgr.ctx_pool[i].config_done_complete);
6169 list_add_tail(&g_ife_hw_mgr.ctx_pool[i].list,
6170 &g_ife_hw_mgr.free_ctx_list);
6171 }
6172
6173 /* Create Worker for ife_hw_mgr with 10 tasks */
6174 rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
6175 &g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0);
6176 if (rc < 0) {
6177 CAM_ERR(CAM_ISP, "Unable to create worker");
6178 goto end;
6179 }
6180
6181 /* fill return structure */
6182 hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr;
6183 hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps;
6184 hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire;
6185 hw_mgr_intf->hw_start = cam_ife_mgr_start_hw;
6186 hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw;
6187 hw_mgr_intf->hw_read = cam_ife_mgr_read;
6188 hw_mgr_intf->hw_write = cam_ife_mgr_write;
6189 hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
6190 hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
6191 hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
6192 hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
Raja Mallike3ed1a32019-08-22 17:12:32 +05306193 hw_mgr_intf->hw_reset = cam_ife_mgr_reset;
Raja Mallikc7e256f2018-12-06 17:36:28 +05306194
6195 if (iommu_hdl)
6196 *iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
6197
6198 cam_ife_hw_mgr_debug_register();
6199 CAM_DBG(CAM_ISP, "Exit");
6200
6201 return 0;
6202end:
6203 if (rc) {
6204 for (i = 0; i < CAM_CTX_MAX; i++) {
6205 cam_tasklet_deinit(
6206 &g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
6207 kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
6208 g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
6209 g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
6210 }
6211 }
6212 cam_smmu_destroy_handle(
6213 g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
6214 g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
6215secure_fail:
6216 cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
6217 CAM_SMMU_DETACH);
6218attach_fail:
6219 cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl);
6220 g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
6221 return rc;
6222}