blob: 6306df30fe924d301b16cbcefdb81184a595ce80 [file] [log] [blame]
Jing Zhouff57d862017-03-21 00:54:25 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/iopoll.h>
14#include <linux/slab.h>
15#include <uapi/media/cam_isp.h>
16#include <uapi/media/cam_defs.h>
17
18#include "cam_ife_csid_core.h"
19#include "cam_isp_hw.h"
20#include "cam_soc_util.h"
21#include "cam_io_util.h"
22
23#undef CDBG
24#define CDBG(fmt, args...) pr_debug(fmt, ##args)
25
26
27/* Timeout value in msec */
28#define IFE_CSID_TIMEOUT 1000
29
30/* TPG VC/DT values */
31#define CAM_IFE_CSID_TPG_VC_VAL 0xA
32#define CAM_IFE_CSID_TPG_DT_VAL 0x2B
33
34/* Timeout values in usec */
35#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000
36#define CAM_IFE_CSID_TIMEOUT_ALL_US 1000000
37
38static int cam_ife_csid_is_ipp_format_supported(
39 uint32_t decode_fmt)
40{
41 int rc = -EINVAL;
42
43 switch (decode_fmt) {
44 case CAM_FORMAT_MIPI_RAW_6:
45 case CAM_FORMAT_MIPI_RAW_8:
46 case CAM_FORMAT_MIPI_RAW_10:
47 case CAM_FORMAT_MIPI_RAW_12:
48 case CAM_FORMAT_MIPI_RAW_14:
49 case CAM_FORMAT_MIPI_RAW_16:
50 case CAM_FORMAT_MIPI_RAW_20:
51 case CAM_FORMAT_DPCM_10_6_10:
52 case CAM_FORMAT_DPCM_10_8_10:
53 case CAM_FORMAT_DPCM_12_6_12:
54 case CAM_FORMAT_DPCM_12_8_12:
55 case CAM_FORMAT_DPCM_14_8_14:
56 case CAM_FORMAT_DPCM_14_10_14:
57 rc = 0;
58 break;
59 default:
60 break;
61 }
62 return rc;
63}
64
65static int cam_ife_csid_get_format(uint32_t res_id,
66 uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
67{
68 int rc = 0;
69
70 if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
71 res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
72 *path_fmt = 0xf;
73 return 0;
74 }
75
76 switch (decode_fmt) {
77 case CAM_FORMAT_MIPI_RAW_6:
78 *path_fmt = 0;
79 *plain_fmt = 0;
80 break;
81 case CAM_FORMAT_MIPI_RAW_8:
82 *path_fmt = 1;
83 *plain_fmt = 0;
84 break;
85 case CAM_FORMAT_MIPI_RAW_10:
86 *path_fmt = 2;
87 *plain_fmt = 1;
88 break;
89 case CAM_FORMAT_MIPI_RAW_12:
90 *path_fmt = 3;
91 *plain_fmt = 1;
92 break;
93 case CAM_FORMAT_MIPI_RAW_14:
94 *path_fmt = 4;
95 *plain_fmt = 1;
96 break;
97 case CAM_FORMAT_MIPI_RAW_16:
98 *path_fmt = 5;
99 *plain_fmt = 1;
100 break;
101 case CAM_FORMAT_MIPI_RAW_20:
102 *path_fmt = 6;
103 *plain_fmt = 2;
104 break;
105 case CAM_FORMAT_DPCM_10_6_10:
106 *path_fmt = 7;
107 *plain_fmt = 1;
108 break;
109 case CAM_FORMAT_DPCM_10_8_10:
110 *path_fmt = 8;
111 *plain_fmt = 1;
112 break;
113 case CAM_FORMAT_DPCM_12_6_12:
114 *path_fmt = 9;
115 *plain_fmt = 1;
116 break;
117 case CAM_FORMAT_DPCM_12_8_12:
118 *path_fmt = 0xA;
119 *plain_fmt = 1;
120 break;
121 case CAM_FORMAT_DPCM_14_8_14:
122 *path_fmt = 0xB;
123 *plain_fmt = 1;
124 break;
125 case CAM_FORMAT_DPCM_14_10_14:
126 *path_fmt = 0xC;
127 *plain_fmt = 1;
128 break;
129 default:
130 pr_err("%s:%d:CSID:%d un supported format\n",
131 __func__, __LINE__, decode_fmt);
132 rc = -EINVAL;
133 }
134
135 return rc;
136}
137
138static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
139 struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
140 uint32_t res_type)
141{
142 int rc = 0;
143 struct cam_ife_csid_cid_data *cid_data;
144 uint32_t i = 0, j = 0;
145
146 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
147 if (csid_hw->cid_res[i].res_state >=
148 CAM_ISP_RESOURCE_STATE_RESERVED) {
149 cid_data = (struct cam_ife_csid_cid_data *)
150 csid_hw->cid_res[i].res_priv;
151 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
152 if (cid_data->tpg_set) {
153 cid_data->cnt++;
154 *res = &csid_hw->cid_res[i];
155 break;
156 }
157 } else {
158 if (cid_data->vc == vc && cid_data->dt == dt) {
159 cid_data->cnt++;
160 *res = &csid_hw->cid_res[i];
161 break;
162 }
163 }
164 }
165 }
166
167 if (i == CAM_IFE_CSID_CID_RES_MAX) {
168 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
169 pr_err("%s:%d:CSID:%d TPG CID not available\n",
170 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
171 rc = -EINVAL;
172 }
173
174 for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
175 if (csid_hw->cid_res[j].res_state ==
176 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
177 cid_data = (struct cam_ife_csid_cid_data *)
178 csid_hw->cid_res[j].res_priv;
179 cid_data->vc = vc;
180 cid_data->dt = dt;
181 cid_data->cnt = 1;
182 csid_hw->cid_res[j].res_state =
183 CAM_ISP_RESOURCE_STATE_RESERVED;
184 *res = &csid_hw->cid_res[j];
185 CDBG("%s:%d:CSID:%d CID %d allocated\n",
186 __func__, __LINE__,
187 csid_hw->hw_intf->hw_idx,
188 csid_hw->cid_res[j].res_id);
189 break;
190 }
191 }
192
193 if (j == CAM_IFE_CSID_CID_RES_MAX) {
194 pr_err("%s:%d:CSID:%d Free cid is not available\n",
195 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
196 rc = -EINVAL;
197 }
198 }
199
200 return rc;
201}
202
203
204static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
205{
206 struct cam_hw_soc_info *soc_info;
207 struct cam_ife_csid_reg_offset *csid_reg;
208 int rc = 0;
209 uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
210 irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
211
212 soc_info = &csid_hw->hw_info->soc_info;
213 csid_reg = csid_hw->csid_info->csid_reg;
214
215 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
216 pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
217 __LINE__, csid_hw->hw_intf->hw_idx,
218 csid_hw->hw_info->hw_state);
219 return -EINVAL;
220 }
221
222 CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
223 csid_hw->hw_intf->hw_idx);
224
225 init_completion(&csid_hw->csid_top_complete);
226
227 /* Save interrupt mask registers values*/
228 irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
229 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
230
231 if (csid_reg->cmn_reg->no_pix)
232 irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
233 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
234
235 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
236 irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
237 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
238 }
239
240 /* Mask all interrupts */
241 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
242 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
243
244 if (csid_reg->cmn_reg->no_pix)
245 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
246 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
247
248 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
249 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
250 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
251
252 /* clear all interrupts */
253 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
254 csid_reg->cmn_reg->csid_top_irq_clear_addr);
255
256 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
257 soc_info->reg_map[0].mem_base +
258 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
259
260 if (csid_reg->cmn_reg->no_pix)
261 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
262 soc_info->reg_map[0].mem_base +
263 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
264
265 for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
266 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
267 soc_info->reg_map[0].mem_base +
268 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
269
270 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
271 csid_reg->cmn_reg->csid_irq_cmd_addr);
272
273 cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
274 csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
275
276 /* enable the IPP and RDI format measure */
277 if (csid_reg->cmn_reg->no_pix)
278 cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
279 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
280
281 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
282 cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
283 csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
284
285 /* perform the top CSID HW reset */
286 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
287 soc_info->reg_map[0].mem_base +
288 csid_reg->cmn_reg->csid_rst_strobes_addr);
289
290 CDBG("%s:%d: Waiting for reset complete from irq handler\n",
291 __func__, __LINE__);
292
293 rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
294 msecs_to_jiffies(IFE_CSID_TIMEOUT));
295 if (rc <= 0) {
296 pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
297 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
298 if (rc == 0)
299 rc = -ETIMEDOUT;
300 } else {
301 rc = 0;
302 }
303
304 /*restore all interrupt masks */
305 cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
306 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
307
308 if (csid_reg->cmn_reg->no_pix)
309 cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
310 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
311
312 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
313 cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
314 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
315
316 return rc;
317}
318
319static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
320 struct cam_csid_reset_cfg_args *reset)
321{
322 int rc = 0;
323 struct cam_hw_soc_info *soc_info;
324 struct cam_isp_resource_node *res;
325 struct cam_ife_csid_reg_offset *csid_reg;
326 uint32_t reset_strb_addr, reset_strb_val, val, id;
327 struct completion *complete;
328
329 csid_reg = csid_hw->csid_info->csid_reg;
330 soc_info = &csid_hw->hw_info->soc_info;
331 res = reset->node_res;
332
333 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
334 pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
335 __LINE__, csid_hw->hw_intf->hw_idx,
336 csid_hw->hw_info->hw_state);
337 return -EINVAL;
338 }
339
340 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
341 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
342 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
343 rc = -EINVAL;
344 goto end;
345 }
346
347 CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
348 csid_hw->hw_intf->hw_idx, res->res_id);
349
350 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
351 if (!csid_reg->ipp_reg) {
352 pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
353 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
354 res->res_id);
355 return -EINVAL;
356 }
357
358 reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
359 complete = &csid_hw->csid_ipp_complete;
360
361 /* Enable path reset done interrupt */
362 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
363 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
364 val |= CSID_PATH_INFO_RST_DONE;
365 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
366 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
367
368 } else {
369 id = res->res_id;
370 if (!csid_reg->rdi_reg[id]) {
371 pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
372 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
373 res->res_id);
374 return -EINVAL;
375 }
376
377 reset_strb_addr =
378 csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
379 complete =
380 &csid_hw->csid_rdin_complete[id];
381
382 /* Enable path reset done interrupt */
383 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
384 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
385 val |= CSID_PATH_INFO_RST_DONE;
386 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
387 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
388 }
389
390 init_completion(complete);
391 reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
392
393 /* Enable the Test gen before reset */
394 cam_io_w_mb(1, csid_hw->hw_info->soc_info.reg_map[0].mem_base +
395 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
396
397 /* Reset the corresponding ife csid path */
398 cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
399 reset_strb_addr);
400
401 rc = wait_for_completion_timeout(complete,
402 msecs_to_jiffies(IFE_CSID_TIMEOUT));
403 if (rc <= 0) {
404 pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
405 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
406 res->res_id, rc);
407 if (rc == 0)
408 rc = -ETIMEDOUT;
409 }
410
411 /* Disable Test Gen after reset*/
412 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
413 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
414
415end:
416 return rc;
417
418}
419
420static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
421 struct cam_csid_hw_reserve_resource_args *cid_reserv)
422{
423 int rc = 0;
424 struct cam_ife_csid_cid_data *cid_data;
425
426 CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
427 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
428 cid_reserv->in_port->res_type,
429 cid_reserv->in_port->lane_type,
430 cid_reserv->in_port->lane_num,
431 cid_reserv->in_port->dt,
432 cid_reserv->in_port->vc);
433
434 if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
435 pr_err("%s:%d:CSID:%d Invalid phy sel %d\n", __func__,
436 __LINE__, csid_hw->hw_intf->hw_idx,
437 cid_reserv->in_port->res_type);
438 rc = -EINVAL;
439 goto end;
440 }
441
442 if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
443 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
444 pr_err("%s:%d:CSID:%d Invalid lane type %d\n", __func__,
445 __LINE__, csid_hw->hw_intf->hw_idx,
446 cid_reserv->in_port->lane_type);
447 rc = -EINVAL;
448 goto end;
449 }
450
451 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
452 cid_reserv->in_port->lane_num > 4) &&
453 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
454 pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
455 __LINE__, csid_hw->hw_intf->hw_idx,
456 cid_reserv->in_port->lane_num);
457 rc = -EINVAL;
458 goto end;
459 }
460 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
461 cid_reserv->in_port->lane_num > 3) &&
462 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
463 pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
464 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
465 cid_reserv->in_port->lane_type,
466 cid_reserv->in_port->lane_num);
467 rc = -EINVAL;
468 goto end;
469 }
470
471 /* CSID CSI2 v2.0 supports 31 vc */
472 if (cid_reserv->in_port->dt > 0x3f ||
473 cid_reserv->in_port->vc > 0x1f) {
474 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
475 __LINE__, csid_hw->hw_intf->hw_idx,
476 cid_reserv->in_port->vc, cid_reserv->in_port->dt);
477 rc = -EINVAL;
478 goto end;
479 }
480
481 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
482 (cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
483 cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
484 pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
485 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
486 cid_reserv->in_port->format);
487 rc = -EINVAL;
488 goto end;
489 }
490
491 if (csid_hw->csi2_reserve_cnt) {
492 /* current configure res type should match requested res type */
493 if (csid_hw->res_type != cid_reserv->in_port->res_type) {
494 rc = -EINVAL;
495 goto end;
496 }
497
498 if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
499 if (csid_hw->csi2_rx_cfg.lane_cfg !=
500 cid_reserv->in_port->lane_cfg ||
501 csid_hw->csi2_rx_cfg.lane_type !=
502 cid_reserv->in_port->lane_type ||
503 csid_hw->csi2_rx_cfg.lane_num !=
504 cid_reserv->in_port->lane_num) {
505 rc = -EINVAL;
506 goto end;
507 }
508 } else {
509 if (csid_hw->tpg_cfg.decode_fmt !=
510 cid_reserv->in_port->format ||
511 csid_hw->tpg_cfg.width !=
512 cid_reserv->in_port->left_width ||
513 csid_hw->tpg_cfg.height !=
514 cid_reserv->in_port->height ||
515 csid_hw->tpg_cfg.test_pattern !=
516 cid_reserv->in_port->test_pattern) {
517 rc = -EINVAL;
518 goto end;
519 }
520 }
521 }
522
523 if (!csid_hw->csi2_reserve_cnt) {
524 csid_hw->res_type = cid_reserv->in_port->res_type;
525 /* Take the first CID resource*/
526 csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
527 cid_data = (struct cam_ife_csid_cid_data *)
528 csid_hw->cid_res[0].res_priv;
529
530 csid_hw->csi2_rx_cfg.lane_cfg =
531 cid_reserv->in_port->lane_cfg;
532 csid_hw->csi2_rx_cfg.lane_type =
533 cid_reserv->in_port->lane_type;
534 csid_hw->csi2_rx_cfg.lane_num =
535 cid_reserv->in_port->lane_num;
536
537 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
538 csid_hw->csi2_rx_cfg.phy_sel = 0;
539 if (cid_reserv->in_port->format >
540 CAM_FORMAT_MIPI_RAW_16) {
541 pr_err("%s:%d: Wrong TPG format\n", __func__,
542 __LINE__);
543 rc = -EINVAL;
544 goto end;
545 }
546 csid_hw->tpg_cfg.decode_fmt =
547 cid_reserv->in_port->format;
548 csid_hw->tpg_cfg.width =
549 cid_reserv->in_port->left_width;
550 csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
551 csid_hw->tpg_cfg.test_pattern =
552 cid_reserv->in_port->test_pattern;
553 cid_data->tpg_set = 1;
554 } else {
555 csid_hw->csi2_rx_cfg.phy_sel =
556 (cid_reserv->in_port->res_type & 0xFF) - 1;
557 }
558
559 cid_data->vc = cid_reserv->in_port->vc;
560 cid_data->dt = cid_reserv->in_port->dt;
561 cid_data->cnt = 1;
562 cid_reserv->node_res = &csid_hw->cid_res[0];
563 csid_hw->csi2_reserve_cnt++;
564
565 CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
566 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
567 cid_reserv->node_res->res_id);
568 } else {
569 rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
570 cid_reserv->in_port->vc, cid_reserv->in_port->dt,
571 cid_reserv->in_port->res_type);
572 /* if success then increment the reserve count */
573 if (!rc) {
574 if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
575 pr_err("%s:%d:CSID%d reserve cnt reached max\n",
576 __func__, __LINE__,
577 csid_hw->hw_intf->hw_idx);
578 rc = -EINVAL;
579 } else {
580 csid_hw->csi2_reserve_cnt++;
581 CDBG("%s:%d:CSID:%d CID:%d acquired\n",
582 __func__, __LINE__,
583 csid_hw->hw_intf->hw_idx,
584 cid_reserv->node_res->res_id);
585 }
586 }
587 }
588
589end:
590 return rc;
591}
592
593
594static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
595 struct cam_csid_hw_reserve_resource_args *reserve)
596{
597 int rc = 0;
598 struct cam_ife_csid_path_cfg *path_data;
599 struct cam_isp_resource_node *res;
600
601 /* CSID CSI2 v2.0 supports 31 vc */
602 if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
603 (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
604 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
605 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
606 reserve->in_port->vc, reserve->in_port->dt,
607 reserve->sync_mode);
608 rc = -EINVAL;
609 goto end;
610 }
611
612 switch (reserve->res_id) {
613 case CAM_IFE_PIX_PATH_RES_IPP:
614 if (csid_hw->ipp_res.res_state !=
615 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
616 CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
617 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
618 csid_hw->ipp_res.res_state);
619 rc = -EINVAL;
620 goto end;
621 }
622
623 if (cam_ife_csid_is_ipp_format_supported(
624 reserve->in_port->format)) {
625 pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
626 __func__, __LINE__,
627 csid_hw->hw_intf->hw_idx, reserve->res_id,
628 reserve->in_port->format);
629 rc = -EINVAL;
630 goto end;
631 }
632
633 /* assign the IPP resource */
634 res = &csid_hw->ipp_res;
635 CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
636 __func__, __LINE__,
637 csid_hw->hw_intf->hw_idx, res->res_id);
638
639 break;
640 case CAM_IFE_PIX_PATH_RES_RDI_0:
641 case CAM_IFE_PIX_PATH_RES_RDI_1:
642 case CAM_IFE_PIX_PATH_RES_RDI_2:
643 case CAM_IFE_PIX_PATH_RES_RDI_3:
644 if (csid_hw->rdi_res[reserve->res_id].res_state !=
645 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
646 CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
647 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
648 reserve->res_id,
649 csid_hw->rdi_res[reserve->res_id].res_state);
650 rc = -EINVAL;
651 goto end;
652 } else {
653 res = &csid_hw->rdi_res[reserve->res_id];
654 CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
655 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
656 res->res_id);
657 }
658
659 break;
660 default:
661 pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
662 __func__, __LINE__,
663 csid_hw->hw_intf->hw_idx, reserve->res_id);
664 rc = -EINVAL;
665 goto end;
666 }
667
668 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
669 path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
670
671 path_data->cid = reserve->cid;
672 path_data->decode_fmt = reserve->in_port->format;
673 path_data->master_idx = reserve->master_idx;
674 path_data->sync_mode = reserve->sync_mode;
675 path_data->height = reserve->in_port->height;
676 path_data->start_line = reserve->in_port->line_start;
677 if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
678 path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
679 path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
680 } else {
681 path_data->dt = reserve->in_port->dt;
682 path_data->vc = reserve->in_port->vc;
683 }
684
685 if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
686 path_data->crop_enable = 1;
687 path_data->start_pixel = reserve->in_port->left_start;
688 path_data->width = reserve->in_port->left_width;
689 } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
690 path_data->crop_enable = 1;
691 path_data->start_pixel = reserve->in_port->right_start;
692 path_data->width = reserve->in_port->right_width;
693 } else
694 path_data->crop_enable = 0;
695
696 reserve->node_res = res;
697
698end:
699 return rc;
700}
701
702static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
703{
704 int rc = 0;
705 struct cam_ife_csid_reg_offset *csid_reg;
706 struct cam_hw_soc_info *soc_info;
707 uint32_t i, status, val;
708
709 csid_reg = csid_hw->csid_info->csid_reg;
710 soc_info = &csid_hw->hw_info->soc_info;
711
712 /* overflow check before increment */
713 if (csid_hw->hw_info->open_count == UINT_MAX) {
714 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
715 __LINE__, csid_hw->hw_intf->hw_idx);
716 return -EINVAL;
717 }
718
719 /* Increment ref Count */
720 csid_hw->hw_info->open_count++;
721 if (csid_hw->hw_info->open_count > 1) {
722 CDBG("%s:%d: CSID hw has already been enabled\n",
723 __func__, __LINE__);
724 return rc;
725 }
726
727 CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
728 csid_hw->hw_intf->hw_idx);
729
730 rc = cam_ife_csid_enable_soc_resources(soc_info);
731 if (rc) {
732 pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
733 csid_hw->hw_intf->hw_idx);
734 goto err;
735 }
736
737
738 CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
739 csid_hw->hw_intf->hw_idx);
740
741 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
742 /* Enable the top IRQ interrupt */
743 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
744 csid_reg->cmn_reg->csid_top_irq_mask_addr);
745
746 rc = cam_ife_csid_global_reset(csid_hw);
747 if (rc) {
748 pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
749 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
750 rc = -ETIMEDOUT;
751 goto disable_soc;
752 }
753
754 /*
755 * Reset the SW registers
756 * SW register reset also reset the mask irq, so poll the irq status
757 * to check the reset complete.
758 */
759 CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
760 csid_hw->hw_intf->hw_idx);
761
762 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
763 soc_info->reg_map[0].mem_base +
764 csid_reg->cmn_reg->csid_rst_strobes_addr);
765
766 rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
767 csid_reg->cmn_reg->csid_top_irq_status_addr,
768 status, (status & 0x1) == 0x1,
769 CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
770 if (rc < 0) {
771 pr_err("%s:%d: software register reset timeout.....\n",
772 __func__, __LINE__);
773 rc = -ETIMEDOUT;
774 goto disable_soc;
775 }
776
777 /* clear all interrupts */
778 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
779 csid_reg->cmn_reg->csid_top_irq_clear_addr);
780
781 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
782 soc_info->reg_map[0].mem_base +
783 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
784
785 if (csid_reg->cmn_reg->no_pix)
786 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
787 soc_info->reg_map[0].mem_base +
788 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
789
790 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
791 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
792 soc_info->reg_map[0].mem_base +
793 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
794
795 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
796 csid_reg->cmn_reg->csid_irq_cmd_addr);
797
798 /* Enable the top IRQ interrupt */
799 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
800 csid_reg->cmn_reg->csid_top_irq_mask_addr);
801
802 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
803 csid_reg->cmn_reg->csid_hw_version_addr);
804 CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
805 csid_hw->hw_intf->hw_idx, val);
806
807 return 0;
808
809disable_soc:
810 cam_ife_csid_disable_soc_resources(soc_info);
811 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
812err:
813 csid_hw->hw_info->open_count--;
814 return rc;
815}
816
817static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
818{
819 int rc = 0;
820 struct cam_hw_soc_info *soc_info;
821 struct cam_ife_csid_reg_offset *csid_reg;
822
823
824 /* Decrement ref Count */
825 if (csid_hw->hw_info->open_count)
826 csid_hw->hw_info->open_count--;
827 if (csid_hw->hw_info->open_count)
828 return rc;
829
830 soc_info = &csid_hw->hw_info->soc_info;
831 csid_reg = csid_hw->csid_info->csid_reg;
832
833 CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
834 csid_hw->hw_intf->hw_idx);
835
836 /*disable the top IRQ interrupt */
837 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
838 csid_reg->cmn_reg->csid_top_irq_mask_addr);
839
840 rc = cam_ife_csid_disable_soc_resources(soc_info);
841 if (rc)
842 pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
843 __LINE__, csid_hw->hw_intf->hw_idx);
844
845 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
846 return rc;
847}
848
849
850static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw *csid_hw,
851 struct cam_isp_resource_node *res)
852{
853 uint32_t val = 0;
854 struct cam_hw_soc_info *soc_info;
855
856 csid_hw->tpg_start_cnt++;
857 if (csid_hw->tpg_start_cnt == 1) {
858 /*Enable the TPG */
859 CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
860 __LINE__, csid_hw->hw_intf->hw_idx);
861
862 soc_info = &csid_hw->hw_info->soc_info;
863 {
864 uint32_t val;
865 uint32_t i;
866 uint32_t base = 0x600;
867
868 CDBG("%s:%d: ================== TPG ===============\n",
869 __func__, __LINE__);
870 for (i = 0; i < 16; i++) {
871 val = cam_io_r_mb(
872 soc_info->reg_map[0].mem_base +
873 base + i * 4);
874 CDBG("%s:%d reg 0x%x = 0x%x\n",
875 __func__, __LINE__,
876 (base + i*4), val);
877 }
878
879 CDBG("%s:%d: ================== IPP ===============\n",
880 __func__, __LINE__);
881 base = 0x200;
882 for (i = 0; i < 10; i++) {
883 val = cam_io_r_mb(
884 soc_info->reg_map[0].mem_base +
885 base + i * 4);
886 CDBG("%s:%d reg 0x%x = 0x%x\n",
887 __func__, __LINE__,
888 (base + i*4), val);
889 }
890
891 CDBG("%s:%d: ================== RX ===============\n",
892 __func__, __LINE__);
893 base = 0x100;
894 for (i = 0; i < 5; i++) {
895 val = cam_io_r_mb(
896 soc_info->reg_map[0].mem_base +
897 base + i * 4);
898 CDBG("%s:%d reg 0x%x = 0x%x\n",
899 __func__, __LINE__,
900 (base + i*4), val);
901 }
902 }
903
904 CDBG("%s:%d: =============== TPG control ===============\n",
905 __func__, __LINE__);
906 val = (4 << 20);
907 val |= (0x80 << 8);
908 val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
909 val |= 7;
910 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
911 csid_hw->csid_info->csid_reg->tpg_reg->
912 csid_tpg_ctrl_addr);
913
914 val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
915 CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
916 0x600, val);
917 }
918
919 return 0;
920}
921
922static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw *csid_hw,
923 struct cam_isp_resource_node *res)
924{
925 struct cam_hw_soc_info *soc_info;
926
927 if (csid_hw->tpg_start_cnt)
928 csid_hw->tpg_start_cnt--;
929
930 if (csid_hw->tpg_start_cnt)
931 return 0;
932
933 soc_info = &csid_hw->hw_info->soc_info;
934
935 /* disable the TPG */
936 if (!csid_hw->tpg_start_cnt) {
937 CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
938 __LINE__, csid_hw->hw_intf->hw_idx);
939
940 /*stop the TPG */
941 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
942 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
943 }
944
945 return 0;
946}
947
948
949static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw *csid_hw,
950 struct cam_isp_resource_node *res)
951{
952 struct cam_ife_csid_reg_offset *csid_reg;
953 struct cam_hw_soc_info *soc_info;
954 uint32_t val = 0;
955
956 csid_reg = csid_hw->csid_info->csid_reg;
957 soc_info = &csid_hw->hw_info->soc_info;
958
959 CDBG("%s:%d CSID:%d TPG config\n", __func__,
960 __LINE__, csid_hw->hw_intf->hw_idx);
961
962 /* configure one DT, infinite frames */
963 val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
964 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
965 csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
966
967 /* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
968 val = (0x740 << 12) | 0x740;
969 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
970 csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
971
972 cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
973 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
974
975 val = csid_hw->tpg_cfg.width << 16 |
976 csid_hw->tpg_cfg.height;
977 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
978 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
979
980 cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
981 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
982
983 /*
984 * decode_fmt is the same as the input resource format.
985 * it is one larger than the register spec format.
986 */
987 val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
988 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
989 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
990
991 /* select rotate period as 5 frame */
992 val = 5 << 8;
993 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
994 csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
995 /* config pix pattern */
996 cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
997 soc_info->reg_map[0].mem_base +
998 csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
999
1000 return 0;
1001}
1002
1003static int cam_ife_csid_enable_csi2(
1004 struct cam_ife_csid_hw *csid_hw,
1005 struct cam_isp_resource_node *res)
1006{
1007 int rc = 0;
1008 struct cam_ife_csid_reg_offset *csid_reg;
1009 struct cam_hw_soc_info *soc_info;
1010 struct cam_ife_csid_cid_data *cid_data;
1011 uint32_t val = 0;
1012
1013 csid_reg = csid_hw->csid_info->csid_reg;
1014 soc_info = &csid_hw->hw_info->soc_info;
1015 CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
1016 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1017
1018 /* overflow check before increment */
1019 if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
1020 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
1021 __LINE__, csid_hw->hw_intf->hw_idx);
1022 return -EINVAL;
1023 }
1024
1025 cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
1026
1027 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1028 csid_hw->csi2_cfg_cnt++;
1029 if (csid_hw->csi2_cfg_cnt > 1)
1030 return rc;
1031
1032 /* rx cfg0 */
1033 val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
1034 (csid_hw->csi2_rx_cfg.lane_cfg << 4) |
1035 (csid_hw->csi2_rx_cfg.lane_type << 24);
1036 val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
1037 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1038 csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
1039
1040 /* rx cfg1*/
1041 val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
1042 /* if VC value is more than 3 than set full width of VC */
1043 if (cid_data->vc > 3)
1044 val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
1045
1046 /* enable packet ecc correction */
1047 val |= 1;
1048 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1049 csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
1050
1051 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
1052 /* Config the TPG */
1053 rc = cam_ife_csid_config_tpg(csid_hw, res);
1054 if (rc) {
1055 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1056 return rc;
1057 }
1058 }
1059
1060 /*Enable the CSI2 rx inerrupts */
1061 val = CSID_CSI2_RX_INFO_RST_DONE |
1062 CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
1063 CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
1064 CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
1065 CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
1066 CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
1067 CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
1068 CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
1069 CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
1070 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1071 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1072
1073 return 0;
1074}
1075
1076static int cam_ife_csid_disable_csi2(
1077 struct cam_ife_csid_hw *csid_hw,
1078 struct cam_isp_resource_node *res)
1079{
1080 struct cam_ife_csid_reg_offset *csid_reg;
1081 struct cam_hw_soc_info *soc_info;
1082
1083 if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
1084 pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
1085 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1086 return -EINVAL;
1087 }
1088
1089 csid_reg = csid_hw->csid_info->csid_reg;
1090 soc_info = &csid_hw->hw_info->soc_info;
1091 CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
1092 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1093
1094 if (csid_hw->csi2_cfg_cnt)
1095 csid_hw->csi2_cfg_cnt--;
1096
1097 if (csid_hw->csi2_cfg_cnt)
1098 return 0;
1099
1100 /*Disable the CSI2 rx inerrupts */
1101 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1102 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1103
1104 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1105
1106 return 0;
1107}
1108
1109static int cam_ife_csid_init_config_ipp_path(
1110 struct cam_ife_csid_hw *csid_hw,
1111 struct cam_isp_resource_node *res)
1112{
1113 int rc = 0;
1114 struct cam_ife_csid_path_cfg *path_data;
1115 struct cam_ife_csid_reg_offset *csid_reg;
1116 struct cam_hw_soc_info *soc_info;
1117 uint32_t path_format = 0, plain_format = 0, val = 0;
1118
1119 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1120 csid_reg = csid_hw->csid_info->csid_reg;
1121 soc_info = &csid_hw->hw_info->soc_info;
1122
1123 if (!csid_reg->ipp_reg) {
1124 pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
1125 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1126 res->res_id);
1127 return -EINVAL;
1128 }
1129
1130 CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
1131 rc = cam_ife_csid_get_format(res->res_id,
1132 path_data->decode_fmt, &path_format, &plain_format);
1133 if (rc)
1134 return rc;
1135
1136 /**
1137 * configure the IPP and enable the time stamp capture.
1138 * enable the HW measrurement blocks
1139 */
1140 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1141 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1142 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1143 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1144 (path_data->crop_enable & 1 <<
1145 csid_reg->cmn_reg->crop_h_en_shift_val) |
1146 (path_data->crop_enable & 1 <<
1147 csid_reg->cmn_reg->crop_v_en_shift_val) |
1148 (1 << 1) | 1;
1149 val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
1150 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1151 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1152
1153 if (path_data->crop_enable) {
1154 val = ((path_data->width +
1155 path_data->start_pixel) & 0xFFFF <<
1156 csid_reg->cmn_reg->crop_shift) |
1157 (path_data->start_pixel & 0xFFFF);
1158
1159 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1160 csid_reg->ipp_reg->csid_ipp_hcrop_addr);
1161
1162 val = ((path_data->height +
1163 path_data->start_line) & 0xFFFF <<
1164 csid_reg->cmn_reg->crop_shift) |
1165 (path_data->start_line & 0xFFFF);
1166
1167 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1168 csid_reg->ipp_reg->csid_ipp_vcrop_addr);
1169 }
1170
1171 /* set frame drop pattern to 0 and period to 1 */
1172 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1173 csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
1174 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1175 csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
1176 /* set irq sub sample pattern to 0 and period to 1 */
1177 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1178 csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
1179 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1180 csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
1181 /* set pixel drop pattern to 0 and period to 1 */
1182 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1183 csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
1184 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1185 csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
1186 /* set line drop pattern to 0 and period to 1 */
1187 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1188 csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
1189 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1190 csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
1191
1192 /*Set master or slave IPP */
1193 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
1194 /*Set halt mode as master */
1195 val = CSID_HALT_MODE_MASTER << 2;
1196 else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
1197 /*Set halt mode as slave and set master idx */
1198 val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
1199 else
1200 /* Default is internal halt mode */
1201 val = 0;
1202
1203 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1204 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1205
1206 /* Enable the IPP path */
1207 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1208 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1209 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1210 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1211 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1212
1213 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1214
1215 return rc;
1216}
1217
1218static int cam_ife_csid_deinit_ipp_path(
1219 struct cam_ife_csid_hw *csid_hw,
1220 struct cam_isp_resource_node *res)
1221{
1222 int rc = 0;
1223 struct cam_ife_csid_reg_offset *csid_reg;
1224 struct cam_hw_soc_info *soc_info;
1225 uint32_t val = 0;
1226
1227 csid_reg = csid_hw->csid_info->csid_reg;
1228 soc_info = &csid_hw->hw_info->soc_info;
1229
1230 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1231 pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
1232 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1233 res->res_type, res->res_id, res->res_state);
1234 rc = -EINVAL;
1235 }
1236
1237 if (!csid_reg->ipp_reg) {
1238 pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
1239 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1240 res->res_id);
1241 rc = -EINVAL;
1242 }
1243
1244 /* Disable the IPP path */
1245 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1246 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1247 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1248 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1249 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1250
1251 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1252 return rc;
1253}
1254
1255static int cam_ife_csid_enable_ipp_path(
1256 struct cam_ife_csid_hw *csid_hw,
1257 struct cam_isp_resource_node *res)
1258{
1259 struct cam_ife_csid_reg_offset *csid_reg;
1260 struct cam_hw_soc_info *soc_info;
1261 struct cam_ife_csid_path_cfg *path_data;
1262 uint32_t val = 0;
1263
1264 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1265 csid_reg = csid_hw->csid_info->csid_reg;
1266 soc_info = &csid_hw->hw_info->soc_info;
1267
1268 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1269 pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
1270 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1271 res->res_type, res->res_id, res->res_state);
1272 return -EINVAL;
1273 }
1274
1275 if (!csid_reg->ipp_reg) {
1276 pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
1277 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1278 res->res_id);
1279 return -EINVAL;
1280 }
1281
1282 CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
1283
1284 /*Resume at frame boundary */
1285 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1286 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1287 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1288 val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
1289 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1290 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1291 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
1292 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1293 soc_info->reg_map[0].mem_base +
1294 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1295 }
1296 /* for slave mode, not need to resume for slave device */
1297
1298 /* Enable the required ipp interrupts */
1299 val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
1300 CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
1301 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1302 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1303
1304 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1305
1306 return 0;
1307}
1308
1309static int cam_ife_csid_disable_ipp_path(
1310 struct cam_ife_csid_hw *csid_hw,
1311 struct cam_isp_resource_node *res,
1312 enum cam_ife_csid_halt_cmd stop_cmd)
1313{
1314 int rc = 0;
1315 struct cam_ife_csid_reg_offset *csid_reg;
1316 struct cam_hw_soc_info *soc_info;
1317 struct cam_ife_csid_path_cfg *path_data;
1318 uint32_t val = 0;
1319
1320 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1321 csid_reg = csid_hw->csid_info->csid_reg;
1322 soc_info = &csid_hw->hw_info->soc_info;
1323
1324 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1325 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1326 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1327 return -EINVAL;
1328 }
1329
1330 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1331 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1332 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1333 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1334 res->res_id, res->res_state);
1335 return rc;
1336 }
1337
1338 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1339 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1340 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1341 res->res_state);
1342 return -EINVAL;
1343 }
1344
1345 if (!csid_reg->ipp_reg) {
1346 pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
1347 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1348 return -EINVAL;
1349 }
1350
1351 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1352 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1353 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1354 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1355 return -EINVAL;
1356 }
1357
1358 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1359 csid_hw->hw_intf->hw_idx, res->res_id);
1360
1361 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1362 /* configure Halt */
1363 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1364 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1365 val &= ~0x3;
1366 val |= stop_cmd;
1367 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1368 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1369 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
1370 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1371 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1372
1373 /* For slave mode, halt command should take it from master */
1374
1375 /* Enable the EOF interrupt for resume at boundary case */
1376 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1377 init_completion(&csid_hw->csid_ipp_complete);
1378 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1379 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1380 val |= CSID_PATH_INFO_INPUT_EOF;
1381 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1382 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1383 } else {
1384 val &= ~(CSID_PATH_INFO_RST_DONE |
1385 CSID_PATH_ERROR_FIFO_OVERFLOW);
1386 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1387 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1388 }
1389
1390 return rc;
1391}
1392
1393
1394static int cam_ife_csid_init_config_rdi_path(
1395 struct cam_ife_csid_hw *csid_hw,
1396 struct cam_isp_resource_node *res)
1397{
1398 int rc = 0;
1399 struct cam_ife_csid_path_cfg *path_data;
1400 struct cam_ife_csid_reg_offset *csid_reg;
1401 struct cam_hw_soc_info *soc_info;
1402 uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
1403
1404 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1405 csid_reg = csid_hw->csid_info->csid_reg;
1406 soc_info = &csid_hw->hw_info->soc_info;
1407
1408 id = res->res_id;
1409 if (!csid_reg->rdi_reg[id]) {
1410 pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
1411 __func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
1412 return -EINVAL;
1413 }
1414
1415 rc = cam_ife_csid_get_format(res->res_id,
1416 path_data->decode_fmt, &path_format, &plain_fmt);
1417 if (rc)
1418 return rc;
1419
1420 /**
1421 * RDI path config and enable the time stamp capture
1422 * Enable the measurement blocks
1423 */
1424 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1425 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1426 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1427 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1428 (plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
1429 (path_data->crop_enable & 1 <<
1430 csid_reg->cmn_reg->crop_h_en_shift_val) |
1431 (path_data->crop_enable & 1 <<
1432 csid_reg->cmn_reg->crop_v_en_shift_val) |
1433 (1 << 2) | 3;
1434
1435 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1436 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1437
1438 if (path_data->crop_enable) {
1439 val = ((path_data->width +
1440 path_data->start_pixel) & 0xFFFF <<
1441 csid_reg->cmn_reg->crop_shift) |
1442 (path_data->start_pixel & 0xFFFF);
1443
1444 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1445 csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
1446
1447 val = ((path_data->height +
1448 path_data->start_line) & 0xFFFF <<
1449 csid_reg->cmn_reg->crop_shift) |
1450 (path_data->start_line & 0xFFFF);
1451
1452 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1453 csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
1454 }
1455 /* set frame drop pattern to 0 and period to 1 */
1456 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1457 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
1458 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1459 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
1460 /* set IRQ sum sabmple */
1461 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1462 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
1463 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1464 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
1465
1466 /* set pixel drop pattern to 0 and period to 1 */
1467 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1468 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
1469 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1470 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
1471 /* set line drop pattern to 0 and period to 1 */
1472 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1473 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
1474 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1475 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
1476
1477 /* Configure the halt mode */
1478 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1479 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1480
1481 /* Enable the RPP path */
1482 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1483 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1484 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1485 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1486 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1487
1488 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1489
1490 return rc;
1491}
1492
1493static int cam_ife_csid_deinit_rdi_path(
1494 struct cam_ife_csid_hw *csid_hw,
1495 struct cam_isp_resource_node *res)
1496{
1497 int rc = 0;
1498 struct cam_ife_csid_reg_offset *csid_reg;
1499 struct cam_hw_soc_info *soc_info;
1500 uint32_t val = 0, id;
1501
1502 csid_reg = csid_hw->csid_info->csid_reg;
1503 soc_info = &csid_hw->hw_info->soc_info;
1504 id = res->res_id;
1505
1506 if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1507 res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1508 !csid_reg->rdi_reg[id]) {
1509 pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
1510 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1511 res->res_state);
1512 return -EINVAL;
1513 }
1514
1515 /* Disable the RDI path */
1516 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1517 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1518 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1519 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1520 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1521
1522 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1523 return rc;
1524}
1525
1526static int cam_ife_csid_enable_rdi_path(
1527 struct cam_ife_csid_hw *csid_hw,
1528 struct cam_isp_resource_node *res)
1529{
1530 struct cam_ife_csid_reg_offset *csid_reg;
1531 struct cam_hw_soc_info *soc_info;
1532 uint32_t id, val;
1533
1534 csid_reg = csid_hw->csid_info->csid_reg;
1535 soc_info = &csid_hw->hw_info->soc_info;
1536 id = res->res_id;
1537
1538 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1539 res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1540 !csid_reg->rdi_reg[id]) {
1541 pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
1542 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1543 res->res_type, res->res_id, res->res_state);
1544 return -EINVAL;
1545 }
1546
1547 /*resume at frame boundary */
1548 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1549 soc_info->reg_map[0].mem_base +
1550 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1551
1552 /* Enable the required RDI interrupts */
1553 val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
1554 CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
1555 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1556 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1557
1558 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1559
1560 return 0;
1561}
1562
1563
1564static int cam_ife_csid_disable_rdi_path(
1565 struct cam_ife_csid_hw *csid_hw,
1566 struct cam_isp_resource_node *res,
1567 enum cam_ife_csid_halt_cmd stop_cmd)
1568{
1569 int rc = 0;
1570 struct cam_ife_csid_reg_offset *csid_reg;
1571 struct cam_hw_soc_info *soc_info;
1572 uint32_t val = 0, id;
1573
1574 csid_reg = csid_hw->csid_info->csid_reg;
1575 soc_info = &csid_hw->hw_info->soc_info;
1576 id = res->res_id;
1577
1578 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
1579 !csid_reg->rdi_reg[res->res_id]) {
1580 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1581 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1582 return -EINVAL;
1583 }
1584
1585 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1586 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1587 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1588 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1589 res->res_id, res->res_state);
1590 return rc;
1591 }
1592
1593 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1594 CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
1595 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1596 res->res_state);
1597 return -EINVAL;
1598 }
1599
1600 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1601 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1602 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1603 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1604 return -EINVAL;
1605 }
1606
1607
1608 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1609 csid_hw->hw_intf->hw_idx, res->res_id);
1610
1611 init_completion(&csid_hw->csid_rdin_complete[id]);
1612
1613 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1614 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1615 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1616 val |= CSID_PATH_INFO_INPUT_EOF;
1617 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1618 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1619 } else {
1620 val &= ~(CSID_PATH_INFO_RST_DONE |
1621 CSID_PATH_ERROR_FIFO_OVERFLOW);
1622 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1623 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1624 }
1625
1626 /*Halt the RDI path */
1627 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1628 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1629
1630 return rc;
1631}
1632
1633static int cam_ife_csid_get_time_stamp(
1634 struct cam_ife_csid_hw *csid_hw, void *cmd_args)
1635{
1636 struct cam_csid_get_time_stamp_args *time_stamp;
1637 struct cam_isp_resource_node *res;
1638 struct cam_ife_csid_reg_offset *csid_reg;
1639 struct cam_hw_soc_info *soc_info;
1640 uint32_t time_32, id;
1641
1642 time_stamp = (struct cam_csid_get_time_stamp_args *)cmd_args;
1643 res = time_stamp->node_res;
1644 csid_reg = csid_hw->csid_info->csid_reg;
1645 soc_info = &csid_hw->hw_info->soc_info;
1646
1647 if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
1648 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1649 CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
1650 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1651 res->res_id);
1652 return -EINVAL;
1653 }
1654
1655 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
1656 pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
1657 __LINE__, csid_hw->hw_intf->hw_idx,
1658 csid_hw->hw_info->hw_state);
1659 return -EINVAL;
1660 }
1661
1662 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1663 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1664 csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
1665 time_stamp->time_stamp_val = time_32;
1666 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1667 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1668 csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
1669 time_stamp->time_stamp_val |= time_32;
1670 } else {
1671 id = res->res_id;
1672 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1673 csid_reg->rdi_reg[id]->
1674 csid_rdi_timestamp_curr1_sof_addr);
1675 time_stamp->time_stamp_val = time_32;
1676 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1677
1678 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1679 csid_reg->rdi_reg[id]->
1680 csid_rdi_timestamp_curr0_sof_addr);
1681 time_stamp->time_stamp_val |= time_32;
1682 }
1683
1684 return 0;
1685}
1686static int cam_ife_csid_res_wait_for_halt(
1687 struct cam_ife_csid_hw *csid_hw,
1688 struct cam_isp_resource_node *res)
1689{
1690 int rc = 0;
1691 struct cam_ife_csid_reg_offset *csid_reg;
1692 struct cam_hw_soc_info *soc_info;
1693
1694 struct completion *complete;
1695 uint32_t val = 0, id;
1696
1697 csid_reg = csid_hw->csid_info->csid_reg;
1698 soc_info = &csid_hw->hw_info->soc_info;
1699
1700 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1701 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1702 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1703 return -EINVAL;
1704 }
1705
1706 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1707 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1708 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1709 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1710 res->res_id, res->res_state);
1711 return rc;
1712 }
1713
1714 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1715 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1716 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1717 res->res_state);
1718 return -EINVAL;
1719 }
1720
1721 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
1722 complete = &csid_hw->csid_ipp_complete;
1723 else
1724 complete = &csid_hw->csid_rdin_complete[res->res_id];
1725
1726 rc = wait_for_completion_timeout(complete,
1727 msecs_to_jiffies(IFE_CSID_TIMEOUT));
1728 if (rc <= 0) {
1729 pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
1730 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1731 res->res_id, rc);
1732 if (rc == 0)
1733 /* continue even have timeout */
1734 rc = -ETIMEDOUT;
1735 }
1736
1737 /* Disable the interrupt */
1738 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1739 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1740 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1741 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1742 CSID_PATH_ERROR_FIFO_OVERFLOW);
1743 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1744 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1745 } else {
1746 id = res->res_id;
1747 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1748 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1749 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1750 CSID_PATH_ERROR_FIFO_OVERFLOW);
1751 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1752 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1753 }
1754 /* set state to init HW */
1755 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1756 return rc;
1757}
1758
1759static int cam_ife_csid_get_hw_caps(void *hw_priv,
1760 void *get_hw_cap_args, uint32_t arg_size)
1761{
1762 int rc = 0;
1763 struct cam_ife_csid_hw_caps *hw_caps;
1764 struct cam_ife_csid_hw *csid_hw;
1765 struct cam_hw_info *csid_hw_info;
1766 struct cam_ife_csid_reg_offset *csid_reg;
1767
1768 if (!hw_priv || !get_hw_cap_args) {
1769 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1770 return -EINVAL;
1771 }
1772
1773 csid_hw_info = (struct cam_hw_info *)hw_priv;
1774 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1775 csid_reg = csid_hw->csid_info->csid_reg;
1776 hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
1777
1778 hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
1779 hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
1780 hw_caps->major_version = csid_reg->cmn_reg->major_version;
1781 hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
1782 hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
1783
1784 CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
1785 __func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
1786 hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
1787 hw_caps->version_incr);
1788
1789 return rc;
1790}
1791
1792static int cam_ife_csid_reset(void *hw_priv,
1793 void *reset_args, uint32_t arg_size)
1794{
1795 struct cam_ife_csid_hw *csid_hw;
1796 struct cam_hw_info *csid_hw_info;
1797 struct cam_csid_reset_cfg_args *reset;
1798 int rc = 0;
1799
1800 if (!hw_priv || !reset_args || (arg_size !=
1801 sizeof(struct cam_csid_reset_cfg_args))) {
1802 pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
1803 return -EINVAL;
1804 }
1805
1806 csid_hw_info = (struct cam_hw_info *)hw_priv;
1807 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1808 reset = (struct cam_csid_reset_cfg_args *)reset_args;
1809
1810 switch (reset->reset_type) {
1811 case CAM_IFE_CSID_RESET_GLOBAL:
1812 rc = cam_ife_csid_global_reset(csid_hw);
1813 break;
1814 case CAM_IFE_CSID_RESET_PATH:
1815 rc = cam_ife_csid_path_reset(csid_hw, reset);
1816 break;
1817 default:
1818 pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
1819 __LINE__, reset->reset_type);
1820 rc = -EINVAL;
1821 break;
1822 }
1823
1824 return rc;
1825}
1826
1827static int cam_ife_csid_reserve(void *hw_priv,
1828 void *reserve_args, uint32_t arg_size)
1829{
1830 int rc = 0;
1831 struct cam_ife_csid_hw *csid_hw;
1832 struct cam_hw_info *csid_hw_info;
1833 struct cam_csid_hw_reserve_resource_args *reserv;
1834
1835 if (!hw_priv || !reserve_args || (arg_size !=
1836 sizeof(struct cam_csid_hw_reserve_resource_args))) {
1837 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1838 return -EINVAL;
1839 }
1840
1841 csid_hw_info = (struct cam_hw_info *)hw_priv;
1842 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1843 reserv = (struct cam_csid_hw_reserve_resource_args *)reserve_args;
1844
1845 mutex_lock(&csid_hw->hw_info->hw_mutex);
1846 switch (reserv->res_type) {
1847 case CAM_ISP_RESOURCE_CID:
1848 rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
1849 break;
1850 case CAM_ISP_RESOURCE_PIX_PATH:
1851 rc = cam_ife_csid_path_reserve(csid_hw, reserv);
1852 break;
1853 default:
1854 pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
1855 __LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
1856 rc = -EINVAL;
1857 break;
1858 }
1859 mutex_unlock(&csid_hw->hw_info->hw_mutex);
1860 return rc;
1861}
1862
1863static int cam_ife_csid_release(void *hw_priv,
1864 void *release_args, uint32_t arg_size)
1865{
1866 int rc = 0;
1867 struct cam_ife_csid_hw *csid_hw;
1868 struct cam_hw_info *csid_hw_info;
1869 struct cam_isp_resource_node *res;
1870 struct cam_ife_csid_cid_data *cid_data;
1871
1872 if (!hw_priv || !release_args ||
1873 (arg_size != sizeof(struct cam_isp_resource_node))) {
1874 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1875 return -EINVAL;
1876 }
1877
1878 csid_hw_info = (struct cam_hw_info *)hw_priv;
1879 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1880 res = (struct cam_isp_resource_node *)release_args;
1881
1882 mutex_lock(&csid_hw->hw_info->hw_mutex);
1883 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
1884 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
1885 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1886 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
1887 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
1888 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1889 res->res_id);
1890 rc = -EINVAL;
1891 goto end;
1892 }
1893
1894 if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
1895 CDBG("%s:%d:CSID:%d res type:%d Res %d in released state\n",
1896 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1897 res->res_type, res->res_id);
1898 goto end;
1899 }
1900
1901 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1902 res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
1903 CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
1904 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1905 res->res_type, res->res_id, res->res_state);
1906 rc = -EINVAL;
1907 goto end;
1908 }
1909
1910 CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
1911 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
1912
1913 switch (res->res_type) {
1914 case CAM_ISP_RESOURCE_CID:
1915 cid_data = (struct cam_ife_csid_cid_data *) res->res_priv;
1916 if (cid_data->cnt)
1917 cid_data->cnt--;
1918
1919 if (!cid_data->cnt)
1920 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
1921
1922 if (csid_hw->csi2_reserve_cnt)
1923 csid_hw->csi2_reserve_cnt--;
1924
1925 if (!csid_hw->csi2_reserve_cnt)
1926 memset(&csid_hw->csi2_rx_cfg, 0,
1927 sizeof(struct cam_ife_csid_csi2_rx_cfg));
1928
1929 CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
1930 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1931 res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
1932
1933 break;
1934 case CAM_ISP_RESOURCE_PIX_PATH:
1935 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
1936 break;
1937 default:
1938 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
1939 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1940 res->res_id);
1941 rc = -EINVAL;
1942 break;
1943 }
1944
1945end:
1946 mutex_unlock(&csid_hw->hw_info->hw_mutex);
1947 return rc;
1948}
1949
1950static int cam_ife_csid_init_hw(void *hw_priv,
1951 void *init_args, uint32_t arg_size)
1952{
1953 int rc = 0;
1954 struct cam_ife_csid_hw *csid_hw;
1955 struct cam_hw_info *csid_hw_info;
1956 struct cam_isp_resource_node *res;
1957 struct cam_ife_csid_reg_offset *csid_reg;
1958
1959 if (!hw_priv || !init_args ||
1960 (arg_size != sizeof(struct cam_isp_resource_node))) {
1961 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1962 return -EINVAL;
1963 }
1964
1965 csid_hw_info = (struct cam_hw_info *)hw_priv;
1966 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1967 res = (struct cam_isp_resource_node *)init_args;
1968 csid_reg = csid_hw->csid_info->csid_reg;
1969
1970 mutex_lock(&csid_hw->hw_info->hw_mutex);
1971 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
1972 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
1973 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1974 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
1975 pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
1976 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1977 res->res_id);
1978 rc = -EINVAL;
1979 goto end;
1980 }
1981
1982
1983 if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
1984 (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
1985 pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
1986 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1987 res->res_type, res->res_id, res->res_state);
1988 rc = -EINVAL;
1989 goto end;
1990 }
1991
1992 CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
1993 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
1994
1995
1996 /* Initialize the csid hardware */
1997 rc = cam_ife_csid_enable_hw(csid_hw);
1998 if (rc)
1999 goto end;
2000
2001 switch (res->res_type) {
2002 case CAM_ISP_RESOURCE_CID:
2003 rc = cam_ife_csid_enable_csi2(csid_hw, res);
2004 break;
2005 case CAM_ISP_RESOURCE_PIX_PATH:
2006 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2007 rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
2008 else
2009 rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
2010
2011 break;
2012 default:
2013 pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
2014 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2015 res->res_type);
2016 break;
2017 }
2018
2019 if (rc)
2020 cam_ife_csid_disable_hw(csid_hw);
2021end:
2022 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2023 return rc;
2024}
2025
2026static int cam_ife_csid_deinit_hw(void *hw_priv,
2027 void *deinit_args, uint32_t arg_size)
2028{
2029 int rc = 0;
2030 struct cam_ife_csid_hw *csid_hw;
2031 struct cam_hw_info *csid_hw_info;
2032 struct cam_isp_resource_node *res;
2033
2034 if (!hw_priv || !deinit_args ||
2035 (arg_size != sizeof(struct cam_isp_resource_node))) {
2036 pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
2037 return -EINVAL;
2038 }
2039
2040 res = (struct cam_isp_resource_node *)deinit_args;
2041 csid_hw_info = (struct cam_hw_info *)hw_priv;
2042 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2043
2044 mutex_lock(&csid_hw->hw_info->hw_mutex);
2045 if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
2046 CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
2047 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2048 res->res_id);
2049 goto end;
2050 }
2051
2052 switch (res->res_type) {
2053 case CAM_ISP_RESOURCE_CID:
2054 rc = cam_ife_csid_disable_csi2(csid_hw, res);
2055 break;
2056 case CAM_ISP_RESOURCE_PIX_PATH:
2057 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2058 rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
2059 else
2060 rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
2061
2062 break;
2063 default:
2064 pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
2065 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2066 res->res_type);
2067 goto end;
2068 }
2069
2070 /* Disable CSID HW */
2071 cam_ife_csid_disable_hw(csid_hw);
2072
2073end:
2074 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2075 return rc;
2076}
2077
2078static int cam_ife_csid_start(void *hw_priv, void *start_args,
2079 uint32_t arg_size)
2080{
2081 int rc = 0;
2082 struct cam_ife_csid_hw *csid_hw;
2083 struct cam_hw_info *csid_hw_info;
2084 struct cam_isp_resource_node *res;
2085 struct cam_ife_csid_reg_offset *csid_reg;
2086
2087 if (!hw_priv || !start_args ||
2088 (arg_size != sizeof(struct cam_isp_resource_node))) {
2089 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2090 return -EINVAL;
2091 }
2092
2093 csid_hw_info = (struct cam_hw_info *)hw_priv;
2094 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2095 res = (struct cam_isp_resource_node *)start_args;
2096 csid_reg = csid_hw->csid_info->csid_reg;
2097
2098 mutex_lock(&csid_hw->hw_info->hw_mutex);
2099 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
2100 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
2101 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2102 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
2103 CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
2104 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
2105 res->res_id);
2106 rc = -EINVAL;
2107 goto end;
2108 }
2109
2110 CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
2111 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
2112
2113 switch (res->res_type) {
2114 case CAM_ISP_RESOURCE_CID:
2115 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2116 rc = cam_ife_csid_tpg_start(csid_hw, res);
2117 break;
2118 case CAM_ISP_RESOURCE_PIX_PATH:
2119 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2120 rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
2121 else
2122 rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
2123 break;
2124 default:
2125 pr_err("%s:%d:CSID:%d Invalid res type%d\n",
2126 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2127 res->res_type);
2128 break;
2129 }
2130end:
2131 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2132 return rc;
2133}
2134
2135static int cam_ife_csid_stop(void *hw_priv,
2136 void *stop_args, uint32_t arg_size)
2137{
2138 int rc = 0;
2139 struct cam_ife_csid_hw *csid_hw;
2140 struct cam_hw_info *csid_hw_info;
2141 struct cam_isp_resource_node *res;
2142 struct cam_csid_hw_stop_args *csid_stop;
2143 uint32_t i;
2144
2145 if (!hw_priv || !stop_args ||
2146 (arg_size != sizeof(struct cam_csid_hw_stop_args))) {
2147 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2148 return -EINVAL;
2149 }
2150 csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
2151 csid_hw_info = (struct cam_hw_info *)hw_priv;
2152 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2153
2154 mutex_lock(&csid_hw->hw_info->hw_mutex);
2155 /* Stop the resource first */
2156 for (i = 0; i < csid_stop->num_res; i++) {
2157 res = csid_stop->node_res[i];
2158 switch (res->res_type) {
2159 case CAM_ISP_RESOURCE_CID:
2160 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2161 rc = cam_ife_csid_tpg_stop(csid_hw, res);
2162 break;
2163 case CAM_ISP_RESOURCE_PIX_PATH:
2164 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2165 rc = cam_ife_csid_disable_ipp_path(csid_hw,
2166 res, csid_stop->stop_cmd);
2167 else
2168 rc = cam_ife_csid_disable_rdi_path(csid_hw,
2169 res, csid_stop->stop_cmd);
2170
2171 break;
2172 default:
2173 pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
2174 __LINE__, csid_hw->hw_intf->hw_idx,
2175 res->res_type);
2176 break;
2177 }
2178 }
2179
2180 /*wait for the path to halt */
2181 for (i = 0; i < csid_stop->num_res; i++) {
2182 res = csid_stop->node_res[i];
2183 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2184 csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
2185 rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
2186 }
2187
2188 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2189 return rc;
2190
2191}
2192
2193static int cam_ife_csid_read(void *hw_priv,
2194 void *read_args, uint32_t arg_size)
2195{
2196 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2197
2198 return -EINVAL;
2199}
2200
2201static int cam_ife_csid_write(void *hw_priv,
2202 void *write_args, uint32_t arg_size)
2203{
2204 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2205 return -EINVAL;
2206}
2207
2208static int cam_ife_csid_process_cmd(void *hw_priv,
2209 uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
2210{
2211 int rc = 0;
2212 struct cam_ife_csid_hw *csid_hw;
2213 struct cam_hw_info *csid_hw_info;
2214
2215 if (!hw_priv || !cmd_args) {
2216 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2217 return -EINVAL;
2218 }
2219
2220 csid_hw_info = (struct cam_hw_info *)hw_priv;
2221 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2222
2223 mutex_lock(&csid_hw->hw_info->hw_mutex);
2224 switch (cmd_type) {
2225 case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
2226 rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
2227 break;
2228 default:
2229 pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
2230 __LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
2231 rc = -EINVAL;
2232 break;
2233 }
2234 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2235
2236 return rc;
2237
2238}
2239
2240irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
2241{
2242 struct cam_ife_csid_hw *csid_hw;
2243 struct cam_hw_soc_info *soc_info;
2244 struct cam_ife_csid_reg_offset *csid_reg;
2245 uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
2246 irq_status_rdi[4];
2247
2248 csid_hw = (struct cam_ife_csid_hw *)data;
2249
2250 CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
2251 csid_hw->hw_intf->hw_idx);
2252
2253 if (!data) {
2254 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2255 return IRQ_HANDLED;
2256 }
2257
2258 csid_reg = csid_hw->csid_info->csid_reg;
2259 soc_info = &csid_hw->hw_info->soc_info;
2260
2261 /* read */
2262 irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2263 csid_reg->cmn_reg->csid_top_irq_status_addr);
2264
2265 irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2266 csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
2267
2268 if (csid_reg->cmn_reg->no_pix)
2269 irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2270 csid_reg->ipp_reg->csid_ipp_irq_status_addr);
2271
2272
2273 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
2274 irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2275 csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
2276
2277 /* clear */
2278 cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
2279 csid_reg->cmn_reg->csid_top_irq_clear_addr);
2280 cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
2281 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
2282 if (csid_reg->cmn_reg->no_pix)
2283 cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
2284 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
2285
2286 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2287 cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
2288 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
2289 }
2290 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
2291 csid_reg->cmn_reg->csid_irq_cmd_addr);
2292
2293 CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
2294 irq_status_rx);
2295 CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
2296 irq_status_ipp);
2297
2298 if (irq_status_top) {
2299 CDBG("%s:%d: CSID global reset complete......Exit\n",
2300 __func__, __LINE__);
2301 complete(&csid_hw->csid_top_complete);
2302 return IRQ_HANDLED;
2303 }
2304
2305
2306 if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
2307 CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
2308 complete(&csid_hw->csid_csi2_complete);
2309 }
2310
2311 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
2312 pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
2313 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2314 }
2315 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
2316 pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
2317 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2318 }
2319 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
2320 pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
2321 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2322 }
2323 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
2324 pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
2325 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2326 }
2327 if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
2328 pr_err_ratelimited("%s:%d:CSID:%d TG OVER FLOW\n",
2329 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2330 }
2331 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
2332 pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
2333 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2334 }
2335 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
2336 pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
2337 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2338 }
2339 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
2340 pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
2341 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2342 }
2343
2344 /*read the IPP errors */
2345 if (csid_reg->cmn_reg->no_pix) {
2346 /* IPP reset done bit */
2347 if (irq_status_ipp &
2348 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2349 CDBG("%s%d: CSID IPP reset complete\n",
2350 __func__, __LINE__);
2351 complete(&csid_hw->csid_ipp_complete);
2352 }
2353 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
2354 CDBG("%s: CSID IPP SOF received\n", __func__);
2355 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
2356 CDBG("%s: CSID IPP SOL received\n", __func__);
2357 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
2358 CDBG("%s: CSID IPP EOL received\n", __func__);
2359 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2360 CDBG("%s: CSID IPP EOF received\n", __func__);
2361
2362 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2363 complete(&csid_hw->csid_ipp_complete);
2364
2365 if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2366 pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
2367 __func__, __LINE__,
2368 csid_hw->hw_intf->hw_idx);
2369 /*Stop IPP path immediately */
2370 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2371 soc_info->reg_map[0].mem_base +
2372 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
2373 }
2374 }
2375
2376 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2377 if (irq_status_rdi[i] &
2378 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2379 CDBG("%s:%d: CSID rdi%d reset complete\n",
2380 __func__, __LINE__, i);
2381 complete(&csid_hw->csid_rdin_complete[i]);
2382 }
2383
2384 if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
2385 complete(&csid_hw->csid_rdin_complete[i]);
2386
2387 if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2388 pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
2389 __func__, __LINE__,
2390 csid_hw->hw_intf->hw_idx);
2391 /*Stop RDI path immediately */
2392 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2393 soc_info->reg_map[0].mem_base +
2394 csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
2395 }
2396 }
2397
2398 CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
2399 return IRQ_HANDLED;
2400}
2401
2402int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
2403 uint32_t csid_idx)
2404{
2405 int rc = -EINVAL;
2406 uint32_t i;
2407 struct cam_ife_csid_path_cfg *path_data;
2408 struct cam_ife_csid_cid_data *cid_data;
2409 struct cam_hw_info *csid_hw_info;
2410 struct cam_ife_csid_hw *ife_csid_hw = NULL;
2411
2412 if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
2413 pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
2414 csid_idx);
2415 return rc;
2416 }
2417
2418 csid_hw_info = (struct cam_hw_info *) csid_hw_intf->hw_priv;
2419 ife_csid_hw = (struct cam_ife_csid_hw *) csid_hw_info->core_info;
2420
2421 ife_csid_hw->hw_intf = csid_hw_intf;
2422 ife_csid_hw->hw_info = csid_hw_info;
2423
2424 CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
2425 ife_csid_hw->hw_intf->hw_type, csid_idx);
2426
2427
2428 ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
2429 mutex_init(&ife_csid_hw->hw_info->hw_mutex);
2430 spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
2431 init_completion(&ife_csid_hw->hw_info->hw_complete);
2432
2433 init_completion(&ife_csid_hw->csid_top_complete);
2434 init_completion(&ife_csid_hw->csid_csi2_complete);
2435 init_completion(&ife_csid_hw->csid_ipp_complete);
2436 for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
2437 init_completion(&ife_csid_hw->csid_rdin_complete[i]);
2438
2439
2440 rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
2441 cam_ife_csid_irq, ife_csid_hw);
2442 if (rc < 0) {
2443 pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
2444 csid_idx);
2445 goto err;
2446 }
2447
2448 ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
2449 ife_csid_hw->hw_intf->hw_ops.init = cam_ife_csid_init_hw;
2450 ife_csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_deinit_hw;
2451 ife_csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_reset;
2452 ife_csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_reserve;
2453 ife_csid_hw->hw_intf->hw_ops.release = cam_ife_csid_release;
2454 ife_csid_hw->hw_intf->hw_ops.start = cam_ife_csid_start;
2455 ife_csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_stop;
2456 ife_csid_hw->hw_intf->hw_ops.read = cam_ife_csid_read;
2457 ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_write;
2458 ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
2459
2460 /*Initialize the CID resoure */
2461 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
2462 ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
2463 ife_csid_hw->cid_res[i].res_id = i;
2464 ife_csid_hw->cid_res[i].res_state =
2465 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2466 ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
2467
2468 cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
2469 GFP_KERNEL);
2470 if (!cid_data) {
2471 rc = -ENOMEM;
2472 goto err;
2473 }
2474 ife_csid_hw->cid_res[i].res_priv = cid_data;
2475 }
2476
2477 /* Initialize the IPP resources */
2478 if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
2479 ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
2480 ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
2481 ife_csid_hw->ipp_res.res_state =
2482 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2483 ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
2484 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2485 GFP_KERNEL);
2486 if (!path_data) {
2487 rc = -ENOMEM;
2488 goto err;
2489 }
2490 ife_csid_hw->ipp_res.res_priv = path_data;
2491 }
2492
2493 /* Initialize the RDI resource */
2494 for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2495 i++) {
2496 /* res type is from RDI 0 to RDI3 */
2497 ife_csid_hw->rdi_res[i].res_type =
2498 CAM_ISP_RESOURCE_PIX_PATH;
2499 ife_csid_hw->rdi_res[i].res_id = i;
2500 ife_csid_hw->rdi_res[i].res_state =
2501 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2502 ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
2503
2504 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2505 GFP_KERNEL);
2506 if (!path_data) {
2507 rc = -ENOMEM;
2508 goto err;
2509 }
2510 ife_csid_hw->rdi_res[i].res_priv = path_data;
2511 }
2512
2513 return 0;
2514err:
2515 if (rc) {
2516 kfree(ife_csid_hw->ipp_res.res_priv);
2517 for (i = 0; i <
2518 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
2519 kfree(ife_csid_hw->rdi_res[i].res_priv);
2520
2521 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2522 kfree(ife_csid_hw->cid_res[i].res_priv);
2523
2524 }
2525
2526 return rc;
2527}
2528
2529
2530int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
2531{
2532 int rc = -EINVAL;
2533 uint32_t i;
2534
2535 if (!ife_csid_hw) {
2536 pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
2537 return rc;
2538 }
2539
2540 /* release the privdate data memory from resources */
2541 kfree(ife_csid_hw->ipp_res.res_priv);
2542 for (i = 0; i <
2543 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2544 i++) {
2545 kfree(ife_csid_hw->rdi_res[i].res_priv);
2546 }
2547 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2548 kfree(ife_csid_hw->cid_res[i].res_priv);
2549
2550
2551 return 0;
2552}
2553
2554