blob: 9f642a317387446808b80cab45657799e3b00065 [file] [log] [blame]
Jing Zhouff57d862017-03-21 00:54:25 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/iopoll.h>
14#include <linux/slab.h>
15#include <uapi/media/cam_isp.h>
16#include <uapi/media/cam_defs.h>
17
18#include "cam_ife_csid_core.h"
19#include "cam_isp_hw.h"
20#include "cam_soc_util.h"
21#include "cam_io_util.h"
22
23#undef CDBG
24#define CDBG(fmt, args...) pr_debug(fmt, ##args)
25
26
27/* Timeout value in msec */
28#define IFE_CSID_TIMEOUT 1000
29
30/* TPG VC/DT values */
31#define CAM_IFE_CSID_TPG_VC_VAL 0xA
32#define CAM_IFE_CSID_TPG_DT_VAL 0x2B
33
34/* Timeout values in usec */
35#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000
36#define CAM_IFE_CSID_TIMEOUT_ALL_US 1000000
37
38static int cam_ife_csid_is_ipp_format_supported(
39 uint32_t decode_fmt)
40{
41 int rc = -EINVAL;
42
43 switch (decode_fmt) {
44 case CAM_FORMAT_MIPI_RAW_6:
45 case CAM_FORMAT_MIPI_RAW_8:
46 case CAM_FORMAT_MIPI_RAW_10:
47 case CAM_FORMAT_MIPI_RAW_12:
48 case CAM_FORMAT_MIPI_RAW_14:
49 case CAM_FORMAT_MIPI_RAW_16:
50 case CAM_FORMAT_MIPI_RAW_20:
51 case CAM_FORMAT_DPCM_10_6_10:
52 case CAM_FORMAT_DPCM_10_8_10:
53 case CAM_FORMAT_DPCM_12_6_12:
54 case CAM_FORMAT_DPCM_12_8_12:
55 case CAM_FORMAT_DPCM_14_8_14:
56 case CAM_FORMAT_DPCM_14_10_14:
57 rc = 0;
58 break;
59 default:
60 break;
61 }
62 return rc;
63}
64
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +053065static int cam_ife_csid_get_format(uint32_t input_fmt,
66 uint32_t *path_fmt)
Jing Zhouff57d862017-03-21 00:54:25 -070067{
68 int rc = 0;
69
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +053070 switch (input_fmt) {
Jing Zhouff57d862017-03-21 00:54:25 -070071 case CAM_FORMAT_MIPI_RAW_6:
72 *path_fmt = 0;
Jing Zhouff57d862017-03-21 00:54:25 -070073 break;
74 case CAM_FORMAT_MIPI_RAW_8:
75 *path_fmt = 1;
Jing Zhouff57d862017-03-21 00:54:25 -070076 break;
77 case CAM_FORMAT_MIPI_RAW_10:
78 *path_fmt = 2;
Jing Zhouff57d862017-03-21 00:54:25 -070079 break;
80 case CAM_FORMAT_MIPI_RAW_12:
81 *path_fmt = 3;
Jing Zhouff57d862017-03-21 00:54:25 -070082 break;
83 case CAM_FORMAT_MIPI_RAW_14:
84 *path_fmt = 4;
Jing Zhouff57d862017-03-21 00:54:25 -070085 break;
86 case CAM_FORMAT_MIPI_RAW_16:
87 *path_fmt = 5;
Jing Zhouff57d862017-03-21 00:54:25 -070088 break;
89 case CAM_FORMAT_MIPI_RAW_20:
90 *path_fmt = 6;
Jing Zhouff57d862017-03-21 00:54:25 -070091 break;
92 case CAM_FORMAT_DPCM_10_6_10:
93 *path_fmt = 7;
Jing Zhouff57d862017-03-21 00:54:25 -070094 break;
95 case CAM_FORMAT_DPCM_10_8_10:
96 *path_fmt = 8;
Jing Zhouff57d862017-03-21 00:54:25 -070097 break;
98 case CAM_FORMAT_DPCM_12_6_12:
99 *path_fmt = 9;
Jing Zhouff57d862017-03-21 00:54:25 -0700100 break;
101 case CAM_FORMAT_DPCM_12_8_12:
102 *path_fmt = 0xA;
Jing Zhouff57d862017-03-21 00:54:25 -0700103 break;
104 case CAM_FORMAT_DPCM_14_8_14:
105 *path_fmt = 0xB;
Jing Zhouff57d862017-03-21 00:54:25 -0700106 break;
107 case CAM_FORMAT_DPCM_14_10_14:
108 *path_fmt = 0xC;
Jing Zhouff57d862017-03-21 00:54:25 -0700109 break;
110 default:
111 pr_err("%s:%d:CSID:%d un supported format\n",
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +0530112 __func__, __LINE__, input_fmt);
Jing Zhouff57d862017-03-21 00:54:25 -0700113 rc = -EINVAL;
114 }
115
116 return rc;
117}
118
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +0530119static int cam_ife_csid_get_rdi_format(uint32_t input_fmt,
120 uint32_t output_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
121{
122 int rc = 0;
123
124 CDBG("%s:%d:input format:%d output format:%d\n",
125 __func__, __LINE__, input_fmt, output_fmt);
126
127 switch (output_fmt) {
128 case CAM_FORMAT_MIPI_RAW_6:
129 case CAM_FORMAT_MIPI_RAW_8:
130 case CAM_FORMAT_MIPI_RAW_10:
131 case CAM_FORMAT_MIPI_RAW_12:
132 case CAM_FORMAT_MIPI_RAW_14:
133 case CAM_FORMAT_MIPI_RAW_16:
134 case CAM_FORMAT_MIPI_RAW_20:
135 case CAM_FORMAT_DPCM_10_6_10:
136 case CAM_FORMAT_DPCM_10_8_10:
137 case CAM_FORMAT_DPCM_12_6_12:
138 case CAM_FORMAT_DPCM_12_8_12:
139 case CAM_FORMAT_DPCM_14_8_14:
140 case CAM_FORMAT_DPCM_14_10_14:
141 *path_fmt = 0xF;
142 *plain_fmt = 0;
143 break;
144
145 case CAM_FORMAT_PLAIN8:
146 rc = cam_ife_csid_get_format(input_fmt, path_fmt);
147 if (rc)
148 goto error;
149
150 *plain_fmt = 0;
151 break;
152 case CAM_FORMAT_PLAIN16_8:
153 case CAM_FORMAT_PLAIN16_10:
154 case CAM_FORMAT_PLAIN16_12:
155 case CAM_FORMAT_PLAIN16_14:
156 case CAM_FORMAT_PLAIN16_16:
157 rc = cam_ife_csid_get_format(input_fmt, path_fmt);
158 if (rc)
159 goto error;
160
161 *plain_fmt = 1;
162 break;
163 case CAM_FORMAT_PLAIN32_20:
164 rc = cam_ife_csid_get_format(input_fmt, path_fmt);
165 if (rc)
166 goto error;
167
168 *plain_fmt = 2;
169 break;
170 default:
171 *path_fmt = 0xF;
172 *plain_fmt = 0;
173 break;
174 }
175
176 CDBG("%s:%d:path format value:%d plain format value:%d\n",
177 __func__, __LINE__, *path_fmt, *plain_fmt);
178
179 return 0;
180error:
181 return rc;
182
183}
184
185
Jing Zhouff57d862017-03-21 00:54:25 -0700186static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
187 struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
188 uint32_t res_type)
189{
190 int rc = 0;
191 struct cam_ife_csid_cid_data *cid_data;
192 uint32_t i = 0, j = 0;
193
194 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
195 if (csid_hw->cid_res[i].res_state >=
196 CAM_ISP_RESOURCE_STATE_RESERVED) {
197 cid_data = (struct cam_ife_csid_cid_data *)
198 csid_hw->cid_res[i].res_priv;
199 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
200 if (cid_data->tpg_set) {
201 cid_data->cnt++;
202 *res = &csid_hw->cid_res[i];
203 break;
204 }
205 } else {
206 if (cid_data->vc == vc && cid_data->dt == dt) {
207 cid_data->cnt++;
208 *res = &csid_hw->cid_res[i];
209 break;
210 }
211 }
212 }
213 }
214
215 if (i == CAM_IFE_CSID_CID_RES_MAX) {
216 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
217 pr_err("%s:%d:CSID:%d TPG CID not available\n",
218 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
219 rc = -EINVAL;
220 }
221
222 for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
223 if (csid_hw->cid_res[j].res_state ==
224 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
225 cid_data = (struct cam_ife_csid_cid_data *)
226 csid_hw->cid_res[j].res_priv;
227 cid_data->vc = vc;
228 cid_data->dt = dt;
229 cid_data->cnt = 1;
230 csid_hw->cid_res[j].res_state =
231 CAM_ISP_RESOURCE_STATE_RESERVED;
232 *res = &csid_hw->cid_res[j];
233 CDBG("%s:%d:CSID:%d CID %d allocated\n",
234 __func__, __LINE__,
235 csid_hw->hw_intf->hw_idx,
236 csid_hw->cid_res[j].res_id);
237 break;
238 }
239 }
240
241 if (j == CAM_IFE_CSID_CID_RES_MAX) {
242 pr_err("%s:%d:CSID:%d Free cid is not available\n",
243 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
244 rc = -EINVAL;
245 }
246 }
247
248 return rc;
249}
250
251
252static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
253{
254 struct cam_hw_soc_info *soc_info;
255 struct cam_ife_csid_reg_offset *csid_reg;
256 int rc = 0;
257 uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
258 irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
259
260 soc_info = &csid_hw->hw_info->soc_info;
261 csid_reg = csid_hw->csid_info->csid_reg;
262
263 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
264 pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
265 __LINE__, csid_hw->hw_intf->hw_idx,
266 csid_hw->hw_info->hw_state);
267 return -EINVAL;
268 }
269
270 CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
271 csid_hw->hw_intf->hw_idx);
272
273 init_completion(&csid_hw->csid_top_complete);
274
275 /* Save interrupt mask registers values*/
276 irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
277 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
278
279 if (csid_reg->cmn_reg->no_pix)
280 irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
281 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
282
283 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
284 irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
285 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
286 }
287
288 /* Mask all interrupts */
289 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
290 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
291
292 if (csid_reg->cmn_reg->no_pix)
293 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
294 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
295
296 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
297 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
298 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
299
300 /* clear all interrupts */
301 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
302 csid_reg->cmn_reg->csid_top_irq_clear_addr);
303
304 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
305 soc_info->reg_map[0].mem_base +
306 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
307
308 if (csid_reg->cmn_reg->no_pix)
309 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
310 soc_info->reg_map[0].mem_base +
311 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
312
313 for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
314 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
315 soc_info->reg_map[0].mem_base +
316 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
317
318 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
319 csid_reg->cmn_reg->csid_irq_cmd_addr);
320
321 cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
322 csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
323
324 /* enable the IPP and RDI format measure */
325 if (csid_reg->cmn_reg->no_pix)
326 cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
327 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
328
329 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
330 cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
331 csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
332
333 /* perform the top CSID HW reset */
334 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
335 soc_info->reg_map[0].mem_base +
336 csid_reg->cmn_reg->csid_rst_strobes_addr);
337
338 CDBG("%s:%d: Waiting for reset complete from irq handler\n",
339 __func__, __LINE__);
340
341 rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
342 msecs_to_jiffies(IFE_CSID_TIMEOUT));
343 if (rc <= 0) {
344 pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
345 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
346 if (rc == 0)
347 rc = -ETIMEDOUT;
348 } else {
349 rc = 0;
350 }
351
352 /*restore all interrupt masks */
353 cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
354 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
355
356 if (csid_reg->cmn_reg->no_pix)
357 cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
358 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
359
360 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
361 cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
362 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
363
364 return rc;
365}
366
367static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
368 struct cam_csid_reset_cfg_args *reset)
369{
370 int rc = 0;
371 struct cam_hw_soc_info *soc_info;
372 struct cam_isp_resource_node *res;
373 struct cam_ife_csid_reg_offset *csid_reg;
374 uint32_t reset_strb_addr, reset_strb_val, val, id;
375 struct completion *complete;
376
377 csid_reg = csid_hw->csid_info->csid_reg;
378 soc_info = &csid_hw->hw_info->soc_info;
379 res = reset->node_res;
380
381 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
382 pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
383 __LINE__, csid_hw->hw_intf->hw_idx,
384 csid_hw->hw_info->hw_state);
385 return -EINVAL;
386 }
387
388 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
389 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
390 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
391 rc = -EINVAL;
392 goto end;
393 }
394
395 CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
396 csid_hw->hw_intf->hw_idx, res->res_id);
397
398 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
399 if (!csid_reg->ipp_reg) {
400 pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
401 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
402 res->res_id);
403 return -EINVAL;
404 }
405
406 reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
407 complete = &csid_hw->csid_ipp_complete;
408
409 /* Enable path reset done interrupt */
410 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
411 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
412 val |= CSID_PATH_INFO_RST_DONE;
413 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
414 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
415
416 } else {
417 id = res->res_id;
418 if (!csid_reg->rdi_reg[id]) {
419 pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
420 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
421 res->res_id);
422 return -EINVAL;
423 }
424
425 reset_strb_addr =
426 csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
427 complete =
428 &csid_hw->csid_rdin_complete[id];
429
430 /* Enable path reset done interrupt */
431 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
432 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
433 val |= CSID_PATH_INFO_RST_DONE;
434 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
435 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
436 }
437
438 init_completion(complete);
439 reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
440
441 /* Enable the Test gen before reset */
442 cam_io_w_mb(1, csid_hw->hw_info->soc_info.reg_map[0].mem_base +
443 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
444
445 /* Reset the corresponding ife csid path */
446 cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
447 reset_strb_addr);
448
449 rc = wait_for_completion_timeout(complete,
450 msecs_to_jiffies(IFE_CSID_TIMEOUT));
451 if (rc <= 0) {
452 pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
453 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
454 res->res_id, rc);
455 if (rc == 0)
456 rc = -ETIMEDOUT;
457 }
458
459 /* Disable Test Gen after reset*/
460 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
461 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
462
463end:
464 return rc;
465
466}
467
468static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
469 struct cam_csid_hw_reserve_resource_args *cid_reserv)
470{
471 int rc = 0;
472 struct cam_ife_csid_cid_data *cid_data;
473
474 CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
475 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
476 cid_reserv->in_port->res_type,
477 cid_reserv->in_port->lane_type,
478 cid_reserv->in_port->lane_num,
479 cid_reserv->in_port->dt,
480 cid_reserv->in_port->vc);
481
482 if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
483 pr_err("%s:%d:CSID:%d Invalid phy sel %d\n", __func__,
484 __LINE__, csid_hw->hw_intf->hw_idx,
485 cid_reserv->in_port->res_type);
486 rc = -EINVAL;
487 goto end;
488 }
489
490 if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
491 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
492 pr_err("%s:%d:CSID:%d Invalid lane type %d\n", __func__,
493 __LINE__, csid_hw->hw_intf->hw_idx,
494 cid_reserv->in_port->lane_type);
495 rc = -EINVAL;
496 goto end;
497 }
498
499 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
500 cid_reserv->in_port->lane_num > 4) &&
501 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
502 pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
503 __LINE__, csid_hw->hw_intf->hw_idx,
504 cid_reserv->in_port->lane_num);
505 rc = -EINVAL;
506 goto end;
507 }
508 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
509 cid_reserv->in_port->lane_num > 3) &&
510 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
511 pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
512 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
513 cid_reserv->in_port->lane_type,
514 cid_reserv->in_port->lane_num);
515 rc = -EINVAL;
516 goto end;
517 }
518
519 /* CSID CSI2 v2.0 supports 31 vc */
520 if (cid_reserv->in_port->dt > 0x3f ||
521 cid_reserv->in_port->vc > 0x1f) {
522 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
523 __LINE__, csid_hw->hw_intf->hw_idx,
524 cid_reserv->in_port->vc, cid_reserv->in_port->dt);
525 rc = -EINVAL;
526 goto end;
527 }
528
529 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
530 (cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
531 cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
532 pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
533 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
534 cid_reserv->in_port->format);
535 rc = -EINVAL;
536 goto end;
537 }
538
539 if (csid_hw->csi2_reserve_cnt) {
540 /* current configure res type should match requested res type */
541 if (csid_hw->res_type != cid_reserv->in_port->res_type) {
542 rc = -EINVAL;
543 goto end;
544 }
545
546 if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
547 if (csid_hw->csi2_rx_cfg.lane_cfg !=
548 cid_reserv->in_port->lane_cfg ||
549 csid_hw->csi2_rx_cfg.lane_type !=
550 cid_reserv->in_port->lane_type ||
551 csid_hw->csi2_rx_cfg.lane_num !=
552 cid_reserv->in_port->lane_num) {
553 rc = -EINVAL;
554 goto end;
555 }
556 } else {
557 if (csid_hw->tpg_cfg.decode_fmt !=
558 cid_reserv->in_port->format ||
559 csid_hw->tpg_cfg.width !=
560 cid_reserv->in_port->left_width ||
561 csid_hw->tpg_cfg.height !=
562 cid_reserv->in_port->height ||
563 csid_hw->tpg_cfg.test_pattern !=
564 cid_reserv->in_port->test_pattern) {
565 rc = -EINVAL;
566 goto end;
567 }
568 }
569 }
570
571 if (!csid_hw->csi2_reserve_cnt) {
572 csid_hw->res_type = cid_reserv->in_port->res_type;
573 /* Take the first CID resource*/
574 csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
575 cid_data = (struct cam_ife_csid_cid_data *)
576 csid_hw->cid_res[0].res_priv;
577
578 csid_hw->csi2_rx_cfg.lane_cfg =
579 cid_reserv->in_port->lane_cfg;
580 csid_hw->csi2_rx_cfg.lane_type =
581 cid_reserv->in_port->lane_type;
582 csid_hw->csi2_rx_cfg.lane_num =
583 cid_reserv->in_port->lane_num;
584
585 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
586 csid_hw->csi2_rx_cfg.phy_sel = 0;
587 if (cid_reserv->in_port->format >
588 CAM_FORMAT_MIPI_RAW_16) {
589 pr_err("%s:%d: Wrong TPG format\n", __func__,
590 __LINE__);
591 rc = -EINVAL;
592 goto end;
593 }
594 csid_hw->tpg_cfg.decode_fmt =
595 cid_reserv->in_port->format;
596 csid_hw->tpg_cfg.width =
597 cid_reserv->in_port->left_width;
598 csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
599 csid_hw->tpg_cfg.test_pattern =
600 cid_reserv->in_port->test_pattern;
601 cid_data->tpg_set = 1;
602 } else {
603 csid_hw->csi2_rx_cfg.phy_sel =
604 (cid_reserv->in_port->res_type & 0xFF) - 1;
605 }
606
607 cid_data->vc = cid_reserv->in_port->vc;
608 cid_data->dt = cid_reserv->in_port->dt;
609 cid_data->cnt = 1;
610 cid_reserv->node_res = &csid_hw->cid_res[0];
611 csid_hw->csi2_reserve_cnt++;
612
613 CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
614 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
615 cid_reserv->node_res->res_id);
616 } else {
617 rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
618 cid_reserv->in_port->vc, cid_reserv->in_port->dt,
619 cid_reserv->in_port->res_type);
620 /* if success then increment the reserve count */
621 if (!rc) {
622 if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
623 pr_err("%s:%d:CSID%d reserve cnt reached max\n",
624 __func__, __LINE__,
625 csid_hw->hw_intf->hw_idx);
626 rc = -EINVAL;
627 } else {
628 csid_hw->csi2_reserve_cnt++;
629 CDBG("%s:%d:CSID:%d CID:%d acquired\n",
630 __func__, __LINE__,
631 csid_hw->hw_intf->hw_idx,
632 cid_reserv->node_res->res_id);
633 }
634 }
635 }
636
637end:
638 return rc;
639}
640
641
642static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
643 struct cam_csid_hw_reserve_resource_args *reserve)
644{
645 int rc = 0;
646 struct cam_ife_csid_path_cfg *path_data;
647 struct cam_isp_resource_node *res;
648
649 /* CSID CSI2 v2.0 supports 31 vc */
650 if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
651 (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
652 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
653 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
654 reserve->in_port->vc, reserve->in_port->dt,
655 reserve->sync_mode);
656 rc = -EINVAL;
657 goto end;
658 }
659
660 switch (reserve->res_id) {
661 case CAM_IFE_PIX_PATH_RES_IPP:
662 if (csid_hw->ipp_res.res_state !=
663 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
664 CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
665 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
666 csid_hw->ipp_res.res_state);
667 rc = -EINVAL;
668 goto end;
669 }
670
671 if (cam_ife_csid_is_ipp_format_supported(
672 reserve->in_port->format)) {
673 pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
674 __func__, __LINE__,
675 csid_hw->hw_intf->hw_idx, reserve->res_id,
676 reserve->in_port->format);
677 rc = -EINVAL;
678 goto end;
679 }
680
681 /* assign the IPP resource */
682 res = &csid_hw->ipp_res;
683 CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
684 __func__, __LINE__,
685 csid_hw->hw_intf->hw_idx, res->res_id);
686
687 break;
688 case CAM_IFE_PIX_PATH_RES_RDI_0:
689 case CAM_IFE_PIX_PATH_RES_RDI_1:
690 case CAM_IFE_PIX_PATH_RES_RDI_2:
691 case CAM_IFE_PIX_PATH_RES_RDI_3:
692 if (csid_hw->rdi_res[reserve->res_id].res_state !=
693 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
694 CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
695 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
696 reserve->res_id,
697 csid_hw->rdi_res[reserve->res_id].res_state);
698 rc = -EINVAL;
699 goto end;
700 } else {
701 res = &csid_hw->rdi_res[reserve->res_id];
702 CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
703 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
704 res->res_id);
705 }
706
707 break;
708 default:
709 pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
710 __func__, __LINE__,
711 csid_hw->hw_intf->hw_idx, reserve->res_id);
712 rc = -EINVAL;
713 goto end;
714 }
715
716 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
717 path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
718
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +0530719 /* store the output format for RDI */
720 switch (reserve->res_id) {
721 case CAM_IFE_PIX_PATH_RES_RDI_0:
722 case CAM_IFE_PIX_PATH_RES_RDI_1:
723 case CAM_IFE_PIX_PATH_RES_RDI_2:
724 case CAM_IFE_PIX_PATH_RES_RDI_3:
725 path_data->output_fmt = reserve->out_port->format;
726 break;
727 default:
728 break;
729 }
730
Jing Zhouff57d862017-03-21 00:54:25 -0700731 path_data->cid = reserve->cid;
732 path_data->decode_fmt = reserve->in_port->format;
733 path_data->master_idx = reserve->master_idx;
734 path_data->sync_mode = reserve->sync_mode;
735 path_data->height = reserve->in_port->height;
736 path_data->start_line = reserve->in_port->line_start;
737 if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
738 path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
739 path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
740 } else {
741 path_data->dt = reserve->in_port->dt;
742 path_data->vc = reserve->in_port->vc;
743 }
744
745 if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
746 path_data->crop_enable = 1;
747 path_data->start_pixel = reserve->in_port->left_start;
748 path_data->width = reserve->in_port->left_width;
749 } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
750 path_data->crop_enable = 1;
751 path_data->start_pixel = reserve->in_port->right_start;
752 path_data->width = reserve->in_port->right_width;
753 } else
754 path_data->crop_enable = 0;
755
756 reserve->node_res = res;
757
758end:
759 return rc;
760}
761
762static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
763{
764 int rc = 0;
765 struct cam_ife_csid_reg_offset *csid_reg;
766 struct cam_hw_soc_info *soc_info;
767 uint32_t i, status, val;
768
769 csid_reg = csid_hw->csid_info->csid_reg;
770 soc_info = &csid_hw->hw_info->soc_info;
771
772 /* overflow check before increment */
773 if (csid_hw->hw_info->open_count == UINT_MAX) {
774 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
775 __LINE__, csid_hw->hw_intf->hw_idx);
776 return -EINVAL;
777 }
778
779 /* Increment ref Count */
780 csid_hw->hw_info->open_count++;
781 if (csid_hw->hw_info->open_count > 1) {
782 CDBG("%s:%d: CSID hw has already been enabled\n",
783 __func__, __LINE__);
784 return rc;
785 }
786
787 CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
788 csid_hw->hw_intf->hw_idx);
789
790 rc = cam_ife_csid_enable_soc_resources(soc_info);
791 if (rc) {
792 pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
793 csid_hw->hw_intf->hw_idx);
794 goto err;
795 }
796
797
798 CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
799 csid_hw->hw_intf->hw_idx);
800
801 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
802 /* Enable the top IRQ interrupt */
803 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
804 csid_reg->cmn_reg->csid_top_irq_mask_addr);
805
806 rc = cam_ife_csid_global_reset(csid_hw);
807 if (rc) {
808 pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
809 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
810 rc = -ETIMEDOUT;
811 goto disable_soc;
812 }
813
814 /*
815 * Reset the SW registers
816 * SW register reset also reset the mask irq, so poll the irq status
817 * to check the reset complete.
818 */
819 CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
820 csid_hw->hw_intf->hw_idx);
821
822 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
823 soc_info->reg_map[0].mem_base +
824 csid_reg->cmn_reg->csid_rst_strobes_addr);
825
826 rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
827 csid_reg->cmn_reg->csid_top_irq_status_addr,
828 status, (status & 0x1) == 0x1,
829 CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
830 if (rc < 0) {
831 pr_err("%s:%d: software register reset timeout.....\n",
832 __func__, __LINE__);
833 rc = -ETIMEDOUT;
834 goto disable_soc;
835 }
836
837 /* clear all interrupts */
838 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
839 csid_reg->cmn_reg->csid_top_irq_clear_addr);
840
841 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
842 soc_info->reg_map[0].mem_base +
843 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
844
845 if (csid_reg->cmn_reg->no_pix)
846 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
847 soc_info->reg_map[0].mem_base +
848 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
849
850 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
851 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
852 soc_info->reg_map[0].mem_base +
853 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
854
855 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
856 csid_reg->cmn_reg->csid_irq_cmd_addr);
857
858 /* Enable the top IRQ interrupt */
859 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
860 csid_reg->cmn_reg->csid_top_irq_mask_addr);
861
862 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
863 csid_reg->cmn_reg->csid_hw_version_addr);
864 CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
865 csid_hw->hw_intf->hw_idx, val);
866
867 return 0;
868
869disable_soc:
870 cam_ife_csid_disable_soc_resources(soc_info);
871 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
872err:
873 csid_hw->hw_info->open_count--;
874 return rc;
875}
876
877static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
878{
879 int rc = 0;
880 struct cam_hw_soc_info *soc_info;
881 struct cam_ife_csid_reg_offset *csid_reg;
882
883
884 /* Decrement ref Count */
885 if (csid_hw->hw_info->open_count)
886 csid_hw->hw_info->open_count--;
887 if (csid_hw->hw_info->open_count)
888 return rc;
889
890 soc_info = &csid_hw->hw_info->soc_info;
891 csid_reg = csid_hw->csid_info->csid_reg;
892
893 CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
894 csid_hw->hw_intf->hw_idx);
895
896 /*disable the top IRQ interrupt */
897 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
898 csid_reg->cmn_reg->csid_top_irq_mask_addr);
899
900 rc = cam_ife_csid_disable_soc_resources(soc_info);
901 if (rc)
902 pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
903 __LINE__, csid_hw->hw_intf->hw_idx);
904
905 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
906 return rc;
907}
908
909
910static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw *csid_hw,
911 struct cam_isp_resource_node *res)
912{
913 uint32_t val = 0;
914 struct cam_hw_soc_info *soc_info;
915
916 csid_hw->tpg_start_cnt++;
917 if (csid_hw->tpg_start_cnt == 1) {
918 /*Enable the TPG */
919 CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
920 __LINE__, csid_hw->hw_intf->hw_idx);
921
922 soc_info = &csid_hw->hw_info->soc_info;
923 {
924 uint32_t val;
925 uint32_t i;
926 uint32_t base = 0x600;
927
928 CDBG("%s:%d: ================== TPG ===============\n",
929 __func__, __LINE__);
930 for (i = 0; i < 16; i++) {
931 val = cam_io_r_mb(
932 soc_info->reg_map[0].mem_base +
933 base + i * 4);
934 CDBG("%s:%d reg 0x%x = 0x%x\n",
935 __func__, __LINE__,
936 (base + i*4), val);
937 }
938
939 CDBG("%s:%d: ================== IPP ===============\n",
940 __func__, __LINE__);
941 base = 0x200;
942 for (i = 0; i < 10; i++) {
943 val = cam_io_r_mb(
944 soc_info->reg_map[0].mem_base +
945 base + i * 4);
946 CDBG("%s:%d reg 0x%x = 0x%x\n",
947 __func__, __LINE__,
948 (base + i*4), val);
949 }
950
951 CDBG("%s:%d: ================== RX ===============\n",
952 __func__, __LINE__);
953 base = 0x100;
954 for (i = 0; i < 5; i++) {
955 val = cam_io_r_mb(
956 soc_info->reg_map[0].mem_base +
957 base + i * 4);
958 CDBG("%s:%d reg 0x%x = 0x%x\n",
959 __func__, __LINE__,
960 (base + i*4), val);
961 }
962 }
963
964 CDBG("%s:%d: =============== TPG control ===============\n",
965 __func__, __LINE__);
966 val = (4 << 20);
967 val |= (0x80 << 8);
968 val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
969 val |= 7;
970 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
971 csid_hw->csid_info->csid_reg->tpg_reg->
972 csid_tpg_ctrl_addr);
973
974 val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
975 CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
976 0x600, val);
977 }
978
979 return 0;
980}
981
982static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw *csid_hw,
983 struct cam_isp_resource_node *res)
984{
985 struct cam_hw_soc_info *soc_info;
986
987 if (csid_hw->tpg_start_cnt)
988 csid_hw->tpg_start_cnt--;
989
990 if (csid_hw->tpg_start_cnt)
991 return 0;
992
993 soc_info = &csid_hw->hw_info->soc_info;
994
995 /* disable the TPG */
996 if (!csid_hw->tpg_start_cnt) {
997 CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
998 __LINE__, csid_hw->hw_intf->hw_idx);
999
1000 /*stop the TPG */
1001 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1002 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
1003 }
1004
1005 return 0;
1006}
1007
1008
1009static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw *csid_hw,
1010 struct cam_isp_resource_node *res)
1011{
1012 struct cam_ife_csid_reg_offset *csid_reg;
1013 struct cam_hw_soc_info *soc_info;
1014 uint32_t val = 0;
1015
1016 csid_reg = csid_hw->csid_info->csid_reg;
1017 soc_info = &csid_hw->hw_info->soc_info;
1018
1019 CDBG("%s:%d CSID:%d TPG config\n", __func__,
1020 __LINE__, csid_hw->hw_intf->hw_idx);
1021
1022 /* configure one DT, infinite frames */
1023 val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
1024 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1025 csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
1026
1027 /* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
1028 val = (0x740 << 12) | 0x740;
1029 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1030 csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
1031
1032 cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
1033 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
1034
1035 val = csid_hw->tpg_cfg.width << 16 |
1036 csid_hw->tpg_cfg.height;
1037 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1038 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
1039
1040 cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
1041 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
1042
1043 /*
1044 * decode_fmt is the same as the input resource format.
1045 * it is one larger than the register spec format.
1046 */
1047 val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
1048 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1049 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
1050
Jing Zhoua4e9fbe2017-05-15 14:37:21 -07001051 /* static frame with split color bar */
1052 val = 1 << 5;
Jing Zhouff57d862017-03-21 00:54:25 -07001053 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1054 csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
1055 /* config pix pattern */
1056 cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
1057 soc_info->reg_map[0].mem_base +
1058 csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
1059
1060 return 0;
1061}
1062
1063static int cam_ife_csid_enable_csi2(
1064 struct cam_ife_csid_hw *csid_hw,
1065 struct cam_isp_resource_node *res)
1066{
1067 int rc = 0;
1068 struct cam_ife_csid_reg_offset *csid_reg;
1069 struct cam_hw_soc_info *soc_info;
1070 struct cam_ife_csid_cid_data *cid_data;
1071 uint32_t val = 0;
1072
1073 csid_reg = csid_hw->csid_info->csid_reg;
1074 soc_info = &csid_hw->hw_info->soc_info;
1075 CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
1076 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1077
1078 /* overflow check before increment */
1079 if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
1080 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
1081 __LINE__, csid_hw->hw_intf->hw_idx);
1082 return -EINVAL;
1083 }
1084
1085 cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
1086
1087 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1088 csid_hw->csi2_cfg_cnt++;
1089 if (csid_hw->csi2_cfg_cnt > 1)
1090 return rc;
1091
1092 /* rx cfg0 */
1093 val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
1094 (csid_hw->csi2_rx_cfg.lane_cfg << 4) |
1095 (csid_hw->csi2_rx_cfg.lane_type << 24);
Alex Wong78578602017-07-07 19:51:43 -07001096 val |= (csid_hw->csi2_rx_cfg.phy_sel & 0x3) << 20;
Jing Zhouff57d862017-03-21 00:54:25 -07001097 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1098 csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
1099
1100 /* rx cfg1*/
1101 val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
1102 /* if VC value is more than 3 than set full width of VC */
1103 if (cid_data->vc > 3)
1104 val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
1105
1106 /* enable packet ecc correction */
1107 val |= 1;
1108 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1109 csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
1110
1111 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
1112 /* Config the TPG */
1113 rc = cam_ife_csid_config_tpg(csid_hw, res);
1114 if (rc) {
1115 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1116 return rc;
1117 }
1118 }
1119
1120 /*Enable the CSI2 rx inerrupts */
1121 val = CSID_CSI2_RX_INFO_RST_DONE |
1122 CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
1123 CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
1124 CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
1125 CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
1126 CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
1127 CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
1128 CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
1129 CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
1130 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1131 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1132
1133 return 0;
1134}
1135
1136static int cam_ife_csid_disable_csi2(
1137 struct cam_ife_csid_hw *csid_hw,
1138 struct cam_isp_resource_node *res)
1139{
1140 struct cam_ife_csid_reg_offset *csid_reg;
1141 struct cam_hw_soc_info *soc_info;
1142
1143 if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
1144 pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
1145 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1146 return -EINVAL;
1147 }
1148
1149 csid_reg = csid_hw->csid_info->csid_reg;
1150 soc_info = &csid_hw->hw_info->soc_info;
1151 CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
1152 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1153
1154 if (csid_hw->csi2_cfg_cnt)
1155 csid_hw->csi2_cfg_cnt--;
1156
1157 if (csid_hw->csi2_cfg_cnt)
1158 return 0;
1159
1160 /*Disable the CSI2 rx inerrupts */
1161 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1162 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1163
1164 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1165
1166 return 0;
1167}
1168
1169static int cam_ife_csid_init_config_ipp_path(
1170 struct cam_ife_csid_hw *csid_hw,
1171 struct cam_isp_resource_node *res)
1172{
1173 int rc = 0;
1174 struct cam_ife_csid_path_cfg *path_data;
1175 struct cam_ife_csid_reg_offset *csid_reg;
1176 struct cam_hw_soc_info *soc_info;
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05301177 uint32_t path_format = 0, val = 0;
Jing Zhouff57d862017-03-21 00:54:25 -07001178
1179 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1180 csid_reg = csid_hw->csid_info->csid_reg;
1181 soc_info = &csid_hw->hw_info->soc_info;
1182
1183 if (!csid_reg->ipp_reg) {
1184 pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
1185 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1186 res->res_id);
1187 return -EINVAL;
1188 }
1189
1190 CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05301191 rc = cam_ife_csid_get_format(path_data->decode_fmt, &path_format);
Jing Zhouff57d862017-03-21 00:54:25 -07001192 if (rc)
1193 return rc;
1194
Jing Zhoubb536a82017-05-18 15:20:38 -07001195 /*
Jing Zhouff57d862017-03-21 00:54:25 -07001196 * configure the IPP and enable the time stamp capture.
1197 * enable the HW measrurement blocks
1198 */
1199 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1200 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1201 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1202 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1203 (path_data->crop_enable & 1 <<
1204 csid_reg->cmn_reg->crop_h_en_shift_val) |
1205 (path_data->crop_enable & 1 <<
1206 csid_reg->cmn_reg->crop_v_en_shift_val) |
1207 (1 << 1) | 1;
1208 val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
1209 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1210 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1211
Jing Zhoudedc4762017-06-19 17:45:36 +05301212 /* select the post irq sub sample strobe for time stamp capture */
1213 cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
1214 csid_reg->ipp_reg->csid_ipp_cfg1_addr);
1215
Jing Zhouff57d862017-03-21 00:54:25 -07001216 if (path_data->crop_enable) {
1217 val = ((path_data->width +
1218 path_data->start_pixel) & 0xFFFF <<
1219 csid_reg->cmn_reg->crop_shift) |
1220 (path_data->start_pixel & 0xFFFF);
1221
1222 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1223 csid_reg->ipp_reg->csid_ipp_hcrop_addr);
1224
1225 val = ((path_data->height +
1226 path_data->start_line) & 0xFFFF <<
1227 csid_reg->cmn_reg->crop_shift) |
1228 (path_data->start_line & 0xFFFF);
1229
1230 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1231 csid_reg->ipp_reg->csid_ipp_vcrop_addr);
1232 }
1233
1234 /* set frame drop pattern to 0 and period to 1 */
1235 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1236 csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
1237 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1238 csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
1239 /* set irq sub sample pattern to 0 and period to 1 */
1240 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1241 csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
1242 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1243 csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
1244 /* set pixel drop pattern to 0 and period to 1 */
1245 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1246 csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
1247 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1248 csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
1249 /* set line drop pattern to 0 and period to 1 */
1250 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1251 csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
1252 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1253 csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
1254
1255 /*Set master or slave IPP */
1256 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
1257 /*Set halt mode as master */
1258 val = CSID_HALT_MODE_MASTER << 2;
1259 else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
1260 /*Set halt mode as slave and set master idx */
1261 val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
1262 else
1263 /* Default is internal halt mode */
1264 val = 0;
1265
1266 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1267 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1268
1269 /* Enable the IPP path */
1270 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1271 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1272 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1273 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1274 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1275
1276 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1277
1278 return rc;
1279}
1280
1281static int cam_ife_csid_deinit_ipp_path(
1282 struct cam_ife_csid_hw *csid_hw,
1283 struct cam_isp_resource_node *res)
1284{
1285 int rc = 0;
1286 struct cam_ife_csid_reg_offset *csid_reg;
1287 struct cam_hw_soc_info *soc_info;
1288 uint32_t val = 0;
1289
1290 csid_reg = csid_hw->csid_info->csid_reg;
1291 soc_info = &csid_hw->hw_info->soc_info;
1292
1293 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1294 pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
1295 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1296 res->res_type, res->res_id, res->res_state);
1297 rc = -EINVAL;
1298 }
1299
1300 if (!csid_reg->ipp_reg) {
1301 pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
1302 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1303 res->res_id);
1304 rc = -EINVAL;
1305 }
1306
1307 /* Disable the IPP path */
1308 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1309 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1310 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1311 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1312 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1313
1314 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1315 return rc;
1316}
1317
1318static int cam_ife_csid_enable_ipp_path(
1319 struct cam_ife_csid_hw *csid_hw,
1320 struct cam_isp_resource_node *res)
1321{
1322 struct cam_ife_csid_reg_offset *csid_reg;
1323 struct cam_hw_soc_info *soc_info;
1324 struct cam_ife_csid_path_cfg *path_data;
1325 uint32_t val = 0;
1326
1327 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1328 csid_reg = csid_hw->csid_info->csid_reg;
1329 soc_info = &csid_hw->hw_info->soc_info;
1330
1331 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1332 pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
1333 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1334 res->res_type, res->res_id, res->res_state);
1335 return -EINVAL;
1336 }
1337
1338 if (!csid_reg->ipp_reg) {
1339 pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
1340 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1341 res->res_id);
1342 return -EINVAL;
1343 }
1344
1345 CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
1346
1347 /*Resume at frame boundary */
1348 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1349 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1350 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1351 val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
1352 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1353 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1354 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
1355 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1356 soc_info->reg_map[0].mem_base +
1357 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1358 }
1359 /* for slave mode, not need to resume for slave device */
1360
1361 /* Enable the required ipp interrupts */
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05301362 val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
Jing Zhouff57d862017-03-21 00:54:25 -07001363 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1364 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1365
1366 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1367
1368 return 0;
1369}
1370
1371static int cam_ife_csid_disable_ipp_path(
1372 struct cam_ife_csid_hw *csid_hw,
1373 struct cam_isp_resource_node *res,
1374 enum cam_ife_csid_halt_cmd stop_cmd)
1375{
1376 int rc = 0;
1377 struct cam_ife_csid_reg_offset *csid_reg;
1378 struct cam_hw_soc_info *soc_info;
1379 struct cam_ife_csid_path_cfg *path_data;
1380 uint32_t val = 0;
1381
1382 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1383 csid_reg = csid_hw->csid_info->csid_reg;
1384 soc_info = &csid_hw->hw_info->soc_info;
1385
1386 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1387 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1388 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1389 return -EINVAL;
1390 }
1391
1392 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1393 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1394 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1395 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1396 res->res_id, res->res_state);
1397 return rc;
1398 }
1399
1400 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1401 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1402 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1403 res->res_state);
1404 return -EINVAL;
1405 }
1406
1407 if (!csid_reg->ipp_reg) {
1408 pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
1409 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1410 return -EINVAL;
1411 }
1412
1413 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1414 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1415 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1416 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1417 return -EINVAL;
1418 }
1419
1420 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1421 csid_hw->hw_intf->hw_idx, res->res_id);
1422
1423 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1424 /* configure Halt */
1425 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1426 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1427 val &= ~0x3;
1428 val |= stop_cmd;
1429 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1430 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1431 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
1432 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1433 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1434
1435 /* For slave mode, halt command should take it from master */
1436
1437 /* Enable the EOF interrupt for resume at boundary case */
1438 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1439 init_completion(&csid_hw->csid_ipp_complete);
1440 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1441 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1442 val |= CSID_PATH_INFO_INPUT_EOF;
1443 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1444 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1445 } else {
1446 val &= ~(CSID_PATH_INFO_RST_DONE |
1447 CSID_PATH_ERROR_FIFO_OVERFLOW);
1448 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1449 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1450 }
1451
1452 return rc;
1453}
1454
1455
1456static int cam_ife_csid_init_config_rdi_path(
1457 struct cam_ife_csid_hw *csid_hw,
1458 struct cam_isp_resource_node *res)
1459{
1460 int rc = 0;
1461 struct cam_ife_csid_path_cfg *path_data;
1462 struct cam_ife_csid_reg_offset *csid_reg;
1463 struct cam_hw_soc_info *soc_info;
1464 uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
1465
1466 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1467 csid_reg = csid_hw->csid_info->csid_reg;
1468 soc_info = &csid_hw->hw_info->soc_info;
1469
1470 id = res->res_id;
1471 if (!csid_reg->rdi_reg[id]) {
1472 pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
1473 __func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
1474 return -EINVAL;
1475 }
1476
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05301477 rc = cam_ife_csid_get_rdi_format(path_data->decode_fmt,
1478 path_data->output_fmt, &path_format, &plain_fmt);
Jing Zhouff57d862017-03-21 00:54:25 -07001479 if (rc)
1480 return rc;
1481
Jing Zhoubb536a82017-05-18 15:20:38 -07001482 /*
Jing Zhouff57d862017-03-21 00:54:25 -07001483 * RDI path config and enable the time stamp capture
1484 * Enable the measurement blocks
1485 */
1486 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1487 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1488 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1489 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1490 (plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
1491 (path_data->crop_enable & 1 <<
1492 csid_reg->cmn_reg->crop_h_en_shift_val) |
1493 (path_data->crop_enable & 1 <<
1494 csid_reg->cmn_reg->crop_v_en_shift_val) |
1495 (1 << 2) | 3;
1496
1497 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1498 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1499
Jing Zhoudedc4762017-06-19 17:45:36 +05301500 /* select the post irq sub sample strobe for time stamp capture */
1501 cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
1502 csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
1503
Jing Zhouff57d862017-03-21 00:54:25 -07001504 if (path_data->crop_enable) {
1505 val = ((path_data->width +
1506 path_data->start_pixel) & 0xFFFF <<
1507 csid_reg->cmn_reg->crop_shift) |
1508 (path_data->start_pixel & 0xFFFF);
1509
1510 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1511 csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
1512
1513 val = ((path_data->height +
1514 path_data->start_line) & 0xFFFF <<
1515 csid_reg->cmn_reg->crop_shift) |
1516 (path_data->start_line & 0xFFFF);
1517
1518 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1519 csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
1520 }
1521 /* set frame drop pattern to 0 and period to 1 */
1522 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1523 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
1524 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1525 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
1526 /* set IRQ sum sabmple */
1527 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1528 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
1529 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1530 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
1531
1532 /* set pixel drop pattern to 0 and period to 1 */
1533 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1534 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
1535 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1536 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
1537 /* set line drop pattern to 0 and period to 1 */
1538 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1539 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
1540 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1541 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
1542
1543 /* Configure the halt mode */
1544 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1545 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1546
1547 /* Enable the RPP path */
1548 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1549 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1550 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1551 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1552 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1553
1554 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1555
1556 return rc;
1557}
1558
1559static int cam_ife_csid_deinit_rdi_path(
1560 struct cam_ife_csid_hw *csid_hw,
1561 struct cam_isp_resource_node *res)
1562{
1563 int rc = 0;
1564 struct cam_ife_csid_reg_offset *csid_reg;
1565 struct cam_hw_soc_info *soc_info;
1566 uint32_t val = 0, id;
1567
1568 csid_reg = csid_hw->csid_info->csid_reg;
1569 soc_info = &csid_hw->hw_info->soc_info;
1570 id = res->res_id;
1571
1572 if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1573 res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1574 !csid_reg->rdi_reg[id]) {
1575 pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
1576 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1577 res->res_state);
1578 return -EINVAL;
1579 }
1580
1581 /* Disable the RDI path */
1582 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1583 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1584 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1585 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1586 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1587
1588 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1589 return rc;
1590}
1591
1592static int cam_ife_csid_enable_rdi_path(
1593 struct cam_ife_csid_hw *csid_hw,
1594 struct cam_isp_resource_node *res)
1595{
1596 struct cam_ife_csid_reg_offset *csid_reg;
1597 struct cam_hw_soc_info *soc_info;
1598 uint32_t id, val;
1599
1600 csid_reg = csid_hw->csid_info->csid_reg;
1601 soc_info = &csid_hw->hw_info->soc_info;
1602 id = res->res_id;
1603
1604 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1605 res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1606 !csid_reg->rdi_reg[id]) {
1607 pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
1608 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1609 res->res_type, res->res_id, res->res_state);
1610 return -EINVAL;
1611 }
1612
1613 /*resume at frame boundary */
1614 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1615 soc_info->reg_map[0].mem_base +
1616 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1617
1618 /* Enable the required RDI interrupts */
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05301619 val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
Jing Zhouff57d862017-03-21 00:54:25 -07001620 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1621 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1622
1623 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1624
1625 return 0;
1626}
1627
1628
1629static int cam_ife_csid_disable_rdi_path(
1630 struct cam_ife_csid_hw *csid_hw,
1631 struct cam_isp_resource_node *res,
1632 enum cam_ife_csid_halt_cmd stop_cmd)
1633{
1634 int rc = 0;
1635 struct cam_ife_csid_reg_offset *csid_reg;
1636 struct cam_hw_soc_info *soc_info;
1637 uint32_t val = 0, id;
1638
1639 csid_reg = csid_hw->csid_info->csid_reg;
1640 soc_info = &csid_hw->hw_info->soc_info;
1641 id = res->res_id;
1642
1643 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
1644 !csid_reg->rdi_reg[res->res_id]) {
1645 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1646 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1647 return -EINVAL;
1648 }
1649
1650 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1651 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1652 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1653 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1654 res->res_id, res->res_state);
1655 return rc;
1656 }
1657
1658 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1659 CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
1660 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1661 res->res_state);
1662 return -EINVAL;
1663 }
1664
1665 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1666 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1667 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1668 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1669 return -EINVAL;
1670 }
1671
1672
1673 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1674 csid_hw->hw_intf->hw_idx, res->res_id);
1675
1676 init_completion(&csid_hw->csid_rdin_complete[id]);
1677
1678 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1679 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1680 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1681 val |= CSID_PATH_INFO_INPUT_EOF;
1682 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1683 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1684 } else {
1685 val &= ~(CSID_PATH_INFO_RST_DONE |
1686 CSID_PATH_ERROR_FIFO_OVERFLOW);
1687 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1688 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1689 }
1690
1691 /*Halt the RDI path */
1692 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1693 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1694
1695 return rc;
1696}
1697
1698static int cam_ife_csid_get_time_stamp(
1699 struct cam_ife_csid_hw *csid_hw, void *cmd_args)
1700{
1701 struct cam_csid_get_time_stamp_args *time_stamp;
1702 struct cam_isp_resource_node *res;
1703 struct cam_ife_csid_reg_offset *csid_reg;
1704 struct cam_hw_soc_info *soc_info;
1705 uint32_t time_32, id;
1706
1707 time_stamp = (struct cam_csid_get_time_stamp_args *)cmd_args;
1708 res = time_stamp->node_res;
1709 csid_reg = csid_hw->csid_info->csid_reg;
1710 soc_info = &csid_hw->hw_info->soc_info;
1711
1712 if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
1713 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1714 CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
1715 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1716 res->res_id);
1717 return -EINVAL;
1718 }
1719
1720 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
1721 pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
1722 __LINE__, csid_hw->hw_intf->hw_idx,
1723 csid_hw->hw_info->hw_state);
1724 return -EINVAL;
1725 }
1726
1727 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1728 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1729 csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
1730 time_stamp->time_stamp_val = time_32;
1731 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1732 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1733 csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
1734 time_stamp->time_stamp_val |= time_32;
1735 } else {
1736 id = res->res_id;
1737 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1738 csid_reg->rdi_reg[id]->
1739 csid_rdi_timestamp_curr1_sof_addr);
1740 time_stamp->time_stamp_val = time_32;
1741 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1742
1743 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1744 csid_reg->rdi_reg[id]->
1745 csid_rdi_timestamp_curr0_sof_addr);
1746 time_stamp->time_stamp_val |= time_32;
1747 }
1748
1749 return 0;
1750}
1751static int cam_ife_csid_res_wait_for_halt(
1752 struct cam_ife_csid_hw *csid_hw,
1753 struct cam_isp_resource_node *res)
1754{
1755 int rc = 0;
1756 struct cam_ife_csid_reg_offset *csid_reg;
1757 struct cam_hw_soc_info *soc_info;
1758
1759 struct completion *complete;
1760 uint32_t val = 0, id;
1761
1762 csid_reg = csid_hw->csid_info->csid_reg;
1763 soc_info = &csid_hw->hw_info->soc_info;
1764
1765 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1766 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1767 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1768 return -EINVAL;
1769 }
1770
1771 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1772 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1773 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1774 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1775 res->res_id, res->res_state);
1776 return rc;
1777 }
1778
1779 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1780 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1781 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1782 res->res_state);
1783 return -EINVAL;
1784 }
1785
1786 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
1787 complete = &csid_hw->csid_ipp_complete;
1788 else
1789 complete = &csid_hw->csid_rdin_complete[res->res_id];
1790
1791 rc = wait_for_completion_timeout(complete,
1792 msecs_to_jiffies(IFE_CSID_TIMEOUT));
1793 if (rc <= 0) {
1794 pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
1795 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1796 res->res_id, rc);
1797 if (rc == 0)
1798 /* continue even have timeout */
1799 rc = -ETIMEDOUT;
1800 }
1801
1802 /* Disable the interrupt */
1803 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1804 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1805 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1806 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1807 CSID_PATH_ERROR_FIFO_OVERFLOW);
1808 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1809 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1810 } else {
1811 id = res->res_id;
1812 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1813 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1814 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1815 CSID_PATH_ERROR_FIFO_OVERFLOW);
1816 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1817 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1818 }
1819 /* set state to init HW */
1820 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1821 return rc;
1822}
1823
1824static int cam_ife_csid_get_hw_caps(void *hw_priv,
1825 void *get_hw_cap_args, uint32_t arg_size)
1826{
1827 int rc = 0;
1828 struct cam_ife_csid_hw_caps *hw_caps;
1829 struct cam_ife_csid_hw *csid_hw;
1830 struct cam_hw_info *csid_hw_info;
1831 struct cam_ife_csid_reg_offset *csid_reg;
1832
1833 if (!hw_priv || !get_hw_cap_args) {
1834 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1835 return -EINVAL;
1836 }
1837
1838 csid_hw_info = (struct cam_hw_info *)hw_priv;
1839 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1840 csid_reg = csid_hw->csid_info->csid_reg;
1841 hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
1842
1843 hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
1844 hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
1845 hw_caps->major_version = csid_reg->cmn_reg->major_version;
1846 hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
1847 hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
1848
1849 CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
1850 __func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
1851 hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
1852 hw_caps->version_incr);
1853
1854 return rc;
1855}
1856
1857static int cam_ife_csid_reset(void *hw_priv,
1858 void *reset_args, uint32_t arg_size)
1859{
1860 struct cam_ife_csid_hw *csid_hw;
1861 struct cam_hw_info *csid_hw_info;
1862 struct cam_csid_reset_cfg_args *reset;
1863 int rc = 0;
1864
1865 if (!hw_priv || !reset_args || (arg_size !=
1866 sizeof(struct cam_csid_reset_cfg_args))) {
1867 pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
1868 return -EINVAL;
1869 }
1870
1871 csid_hw_info = (struct cam_hw_info *)hw_priv;
1872 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1873 reset = (struct cam_csid_reset_cfg_args *)reset_args;
1874
1875 switch (reset->reset_type) {
1876 case CAM_IFE_CSID_RESET_GLOBAL:
1877 rc = cam_ife_csid_global_reset(csid_hw);
1878 break;
1879 case CAM_IFE_CSID_RESET_PATH:
1880 rc = cam_ife_csid_path_reset(csid_hw, reset);
1881 break;
1882 default:
1883 pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
1884 __LINE__, reset->reset_type);
1885 rc = -EINVAL;
1886 break;
1887 }
1888
1889 return rc;
1890}
1891
1892static int cam_ife_csid_reserve(void *hw_priv,
1893 void *reserve_args, uint32_t arg_size)
1894{
1895 int rc = 0;
1896 struct cam_ife_csid_hw *csid_hw;
1897 struct cam_hw_info *csid_hw_info;
1898 struct cam_csid_hw_reserve_resource_args *reserv;
1899
1900 if (!hw_priv || !reserve_args || (arg_size !=
1901 sizeof(struct cam_csid_hw_reserve_resource_args))) {
1902 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1903 return -EINVAL;
1904 }
1905
1906 csid_hw_info = (struct cam_hw_info *)hw_priv;
1907 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1908 reserv = (struct cam_csid_hw_reserve_resource_args *)reserve_args;
1909
1910 mutex_lock(&csid_hw->hw_info->hw_mutex);
1911 switch (reserv->res_type) {
1912 case CAM_ISP_RESOURCE_CID:
1913 rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
1914 break;
1915 case CAM_ISP_RESOURCE_PIX_PATH:
1916 rc = cam_ife_csid_path_reserve(csid_hw, reserv);
1917 break;
1918 default:
1919 pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
1920 __LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
1921 rc = -EINVAL;
1922 break;
1923 }
1924 mutex_unlock(&csid_hw->hw_info->hw_mutex);
1925 return rc;
1926}
1927
1928static int cam_ife_csid_release(void *hw_priv,
1929 void *release_args, uint32_t arg_size)
1930{
1931 int rc = 0;
1932 struct cam_ife_csid_hw *csid_hw;
1933 struct cam_hw_info *csid_hw_info;
1934 struct cam_isp_resource_node *res;
1935 struct cam_ife_csid_cid_data *cid_data;
1936
1937 if (!hw_priv || !release_args ||
1938 (arg_size != sizeof(struct cam_isp_resource_node))) {
1939 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1940 return -EINVAL;
1941 }
1942
1943 csid_hw_info = (struct cam_hw_info *)hw_priv;
1944 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1945 res = (struct cam_isp_resource_node *)release_args;
1946
1947 mutex_lock(&csid_hw->hw_info->hw_mutex);
1948 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
1949 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
1950 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1951 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
1952 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
1953 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1954 res->res_id);
1955 rc = -EINVAL;
1956 goto end;
1957 }
1958
1959 if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
1960 CDBG("%s:%d:CSID:%d res type:%d Res %d in released state\n",
1961 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1962 res->res_type, res->res_id);
1963 goto end;
1964 }
1965
1966 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1967 res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
1968 CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
1969 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1970 res->res_type, res->res_id, res->res_state);
1971 rc = -EINVAL;
1972 goto end;
1973 }
1974
1975 CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
1976 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
1977
1978 switch (res->res_type) {
1979 case CAM_ISP_RESOURCE_CID:
1980 cid_data = (struct cam_ife_csid_cid_data *) res->res_priv;
1981 if (cid_data->cnt)
1982 cid_data->cnt--;
1983
1984 if (!cid_data->cnt)
1985 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
1986
1987 if (csid_hw->csi2_reserve_cnt)
1988 csid_hw->csi2_reserve_cnt--;
1989
1990 if (!csid_hw->csi2_reserve_cnt)
1991 memset(&csid_hw->csi2_rx_cfg, 0,
1992 sizeof(struct cam_ife_csid_csi2_rx_cfg));
1993
1994 CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
1995 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1996 res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
1997
1998 break;
1999 case CAM_ISP_RESOURCE_PIX_PATH:
2000 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
2001 break;
2002 default:
2003 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
2004 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
2005 res->res_id);
2006 rc = -EINVAL;
2007 break;
2008 }
2009
2010end:
2011 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2012 return rc;
2013}
2014
2015static int cam_ife_csid_init_hw(void *hw_priv,
2016 void *init_args, uint32_t arg_size)
2017{
2018 int rc = 0;
2019 struct cam_ife_csid_hw *csid_hw;
2020 struct cam_hw_info *csid_hw_info;
2021 struct cam_isp_resource_node *res;
2022 struct cam_ife_csid_reg_offset *csid_reg;
2023
2024 if (!hw_priv || !init_args ||
2025 (arg_size != sizeof(struct cam_isp_resource_node))) {
2026 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2027 return -EINVAL;
2028 }
2029
2030 csid_hw_info = (struct cam_hw_info *)hw_priv;
2031 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2032 res = (struct cam_isp_resource_node *)init_args;
2033 csid_reg = csid_hw->csid_info->csid_reg;
2034
2035 mutex_lock(&csid_hw->hw_info->hw_mutex);
2036 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
2037 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
2038 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2039 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
2040 pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
2041 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
2042 res->res_id);
2043 rc = -EINVAL;
2044 goto end;
2045 }
2046
2047
2048 if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
2049 (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
2050 pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
2051 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2052 res->res_type, res->res_id, res->res_state);
2053 rc = -EINVAL;
2054 goto end;
2055 }
2056
2057 CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
2058 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
2059
2060
2061 /* Initialize the csid hardware */
2062 rc = cam_ife_csid_enable_hw(csid_hw);
2063 if (rc)
2064 goto end;
2065
2066 switch (res->res_type) {
2067 case CAM_ISP_RESOURCE_CID:
2068 rc = cam_ife_csid_enable_csi2(csid_hw, res);
2069 break;
2070 case CAM_ISP_RESOURCE_PIX_PATH:
2071 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2072 rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
2073 else
2074 rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
2075
2076 break;
2077 default:
2078 pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
2079 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2080 res->res_type);
2081 break;
2082 }
2083
2084 if (rc)
2085 cam_ife_csid_disable_hw(csid_hw);
2086end:
2087 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2088 return rc;
2089}
2090
2091static int cam_ife_csid_deinit_hw(void *hw_priv,
2092 void *deinit_args, uint32_t arg_size)
2093{
2094 int rc = 0;
2095 struct cam_ife_csid_hw *csid_hw;
2096 struct cam_hw_info *csid_hw_info;
2097 struct cam_isp_resource_node *res;
2098
2099 if (!hw_priv || !deinit_args ||
2100 (arg_size != sizeof(struct cam_isp_resource_node))) {
2101 pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
2102 return -EINVAL;
2103 }
2104
2105 res = (struct cam_isp_resource_node *)deinit_args;
2106 csid_hw_info = (struct cam_hw_info *)hw_priv;
2107 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2108
2109 mutex_lock(&csid_hw->hw_info->hw_mutex);
2110 if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
2111 CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
2112 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2113 res->res_id);
2114 goto end;
2115 }
2116
2117 switch (res->res_type) {
2118 case CAM_ISP_RESOURCE_CID:
2119 rc = cam_ife_csid_disable_csi2(csid_hw, res);
2120 break;
2121 case CAM_ISP_RESOURCE_PIX_PATH:
2122 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2123 rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
2124 else
2125 rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
2126
2127 break;
2128 default:
2129 pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
2130 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2131 res->res_type);
2132 goto end;
2133 }
2134
2135 /* Disable CSID HW */
2136 cam_ife_csid_disable_hw(csid_hw);
2137
2138end:
2139 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2140 return rc;
2141}
2142
2143static int cam_ife_csid_start(void *hw_priv, void *start_args,
2144 uint32_t arg_size)
2145{
2146 int rc = 0;
2147 struct cam_ife_csid_hw *csid_hw;
2148 struct cam_hw_info *csid_hw_info;
2149 struct cam_isp_resource_node *res;
2150 struct cam_ife_csid_reg_offset *csid_reg;
2151
2152 if (!hw_priv || !start_args ||
2153 (arg_size != sizeof(struct cam_isp_resource_node))) {
2154 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2155 return -EINVAL;
2156 }
2157
2158 csid_hw_info = (struct cam_hw_info *)hw_priv;
2159 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2160 res = (struct cam_isp_resource_node *)start_args;
2161 csid_reg = csid_hw->csid_info->csid_reg;
2162
Jing Zhouff57d862017-03-21 00:54:25 -07002163 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
2164 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
2165 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2166 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
2167 CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
2168 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
2169 res->res_id);
2170 rc = -EINVAL;
2171 goto end;
2172 }
2173
2174 CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
2175 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
2176
2177 switch (res->res_type) {
2178 case CAM_ISP_RESOURCE_CID:
2179 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2180 rc = cam_ife_csid_tpg_start(csid_hw, res);
2181 break;
2182 case CAM_ISP_RESOURCE_PIX_PATH:
2183 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2184 rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
2185 else
2186 rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
2187 break;
2188 default:
2189 pr_err("%s:%d:CSID:%d Invalid res type%d\n",
2190 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2191 res->res_type);
2192 break;
2193 }
2194end:
Jing Zhouff57d862017-03-21 00:54:25 -07002195 return rc;
2196}
2197
2198static int cam_ife_csid_stop(void *hw_priv,
2199 void *stop_args, uint32_t arg_size)
2200{
2201 int rc = 0;
2202 struct cam_ife_csid_hw *csid_hw;
2203 struct cam_hw_info *csid_hw_info;
2204 struct cam_isp_resource_node *res;
2205 struct cam_csid_hw_stop_args *csid_stop;
2206 uint32_t i;
2207
2208 if (!hw_priv || !stop_args ||
2209 (arg_size != sizeof(struct cam_csid_hw_stop_args))) {
2210 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2211 return -EINVAL;
2212 }
2213 csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
2214 csid_hw_info = (struct cam_hw_info *)hw_priv;
2215 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2216
Jing Zhouff57d862017-03-21 00:54:25 -07002217 /* Stop the resource first */
2218 for (i = 0; i < csid_stop->num_res; i++) {
2219 res = csid_stop->node_res[i];
2220 switch (res->res_type) {
2221 case CAM_ISP_RESOURCE_CID:
2222 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2223 rc = cam_ife_csid_tpg_stop(csid_hw, res);
2224 break;
2225 case CAM_ISP_RESOURCE_PIX_PATH:
2226 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2227 rc = cam_ife_csid_disable_ipp_path(csid_hw,
2228 res, csid_stop->stop_cmd);
2229 else
2230 rc = cam_ife_csid_disable_rdi_path(csid_hw,
2231 res, csid_stop->stop_cmd);
2232
2233 break;
2234 default:
2235 pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
2236 __LINE__, csid_hw->hw_intf->hw_idx,
2237 res->res_type);
2238 break;
2239 }
2240 }
2241
2242 /*wait for the path to halt */
2243 for (i = 0; i < csid_stop->num_res; i++) {
2244 res = csid_stop->node_res[i];
2245 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2246 csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
2247 rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05302248 else
2249 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
Jing Zhouff57d862017-03-21 00:54:25 -07002250 }
2251
Jing Zhouff57d862017-03-21 00:54:25 -07002252 return rc;
2253
2254}
2255
2256static int cam_ife_csid_read(void *hw_priv,
2257 void *read_args, uint32_t arg_size)
2258{
2259 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2260
2261 return -EINVAL;
2262}
2263
2264static int cam_ife_csid_write(void *hw_priv,
2265 void *write_args, uint32_t arg_size)
2266{
2267 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2268 return -EINVAL;
2269}
2270
2271static int cam_ife_csid_process_cmd(void *hw_priv,
2272 uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
2273{
2274 int rc = 0;
2275 struct cam_ife_csid_hw *csid_hw;
2276 struct cam_hw_info *csid_hw_info;
2277
2278 if (!hw_priv || !cmd_args) {
2279 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2280 return -EINVAL;
2281 }
2282
2283 csid_hw_info = (struct cam_hw_info *)hw_priv;
2284 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2285
Jing Zhouff57d862017-03-21 00:54:25 -07002286 switch (cmd_type) {
2287 case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
2288 rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
2289 break;
2290 default:
2291 pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
2292 __LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
2293 rc = -EINVAL;
2294 break;
2295 }
Jing Zhouff57d862017-03-21 00:54:25 -07002296
2297 return rc;
2298
2299}
2300
2301irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
2302{
2303 struct cam_ife_csid_hw *csid_hw;
2304 struct cam_hw_soc_info *soc_info;
2305 struct cam_ife_csid_reg_offset *csid_reg;
2306 uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
2307 irq_status_rdi[4];
2308
2309 csid_hw = (struct cam_ife_csid_hw *)data;
2310
2311 CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
2312 csid_hw->hw_intf->hw_idx);
2313
2314 if (!data) {
2315 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2316 return IRQ_HANDLED;
2317 }
2318
2319 csid_reg = csid_hw->csid_info->csid_reg;
2320 soc_info = &csid_hw->hw_info->soc_info;
2321
2322 /* read */
2323 irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2324 csid_reg->cmn_reg->csid_top_irq_status_addr);
2325
2326 irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2327 csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
2328
2329 if (csid_reg->cmn_reg->no_pix)
2330 irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2331 csid_reg->ipp_reg->csid_ipp_irq_status_addr);
2332
2333
2334 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
2335 irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2336 csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
2337
2338 /* clear */
2339 cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
2340 csid_reg->cmn_reg->csid_top_irq_clear_addr);
2341 cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
2342 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
2343 if (csid_reg->cmn_reg->no_pix)
2344 cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
2345 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
2346
2347 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2348 cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
2349 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
2350 }
2351 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
2352 csid_reg->cmn_reg->csid_irq_cmd_addr);
2353
2354 CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
2355 irq_status_rx);
2356 CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
2357 irq_status_ipp);
2358
2359 if (irq_status_top) {
2360 CDBG("%s:%d: CSID global reset complete......Exit\n",
2361 __func__, __LINE__);
2362 complete(&csid_hw->csid_top_complete);
2363 return IRQ_HANDLED;
2364 }
2365
2366
2367 if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
2368 CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
2369 complete(&csid_hw->csid_csi2_complete);
2370 }
2371
2372 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
2373 pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
2374 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2375 }
2376 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
2377 pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
2378 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2379 }
2380 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
2381 pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
2382 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2383 }
2384 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
2385 pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
2386 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2387 }
2388 if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
2389 pr_err_ratelimited("%s:%d:CSID:%d TG OVER FLOW\n",
2390 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2391 }
2392 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
2393 pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
2394 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2395 }
2396 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
2397 pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
2398 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2399 }
2400 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
2401 pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
2402 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2403 }
2404
2405 /*read the IPP errors */
2406 if (csid_reg->cmn_reg->no_pix) {
2407 /* IPP reset done bit */
2408 if (irq_status_ipp &
2409 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2410 CDBG("%s%d: CSID IPP reset complete\n",
2411 __func__, __LINE__);
2412 complete(&csid_hw->csid_ipp_complete);
2413 }
2414 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
2415 CDBG("%s: CSID IPP SOF received\n", __func__);
2416 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
2417 CDBG("%s: CSID IPP SOL received\n", __func__);
2418 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
2419 CDBG("%s: CSID IPP EOL received\n", __func__);
2420 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2421 CDBG("%s: CSID IPP EOF received\n", __func__);
2422
2423 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2424 complete(&csid_hw->csid_ipp_complete);
2425
2426 if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2427 pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
2428 __func__, __LINE__,
2429 csid_hw->hw_intf->hw_idx);
2430 /*Stop IPP path immediately */
2431 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2432 soc_info->reg_map[0].mem_base +
2433 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
2434 }
2435 }
2436
2437 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2438 if (irq_status_rdi[i] &
2439 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2440 CDBG("%s:%d: CSID rdi%d reset complete\n",
2441 __func__, __LINE__, i);
2442 complete(&csid_hw->csid_rdin_complete[i]);
2443 }
2444
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05302445 if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF)
2446 CDBG("%s: CSID RDI SOF received\n", __func__);
2447 if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
2448 CDBG("%s: CSID RDI EOF received\n", __func__);
2449
Jing Zhouff57d862017-03-21 00:54:25 -07002450 if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
2451 complete(&csid_hw->csid_rdin_complete[i]);
2452
2453 if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2454 pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
2455 __func__, __LINE__,
2456 csid_hw->hw_intf->hw_idx);
2457 /*Stop RDI path immediately */
2458 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2459 soc_info->reg_map[0].mem_base +
2460 csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
2461 }
2462 }
2463
2464 CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
2465 return IRQ_HANDLED;
2466}
2467
2468int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
2469 uint32_t csid_idx)
2470{
2471 int rc = -EINVAL;
2472 uint32_t i;
2473 struct cam_ife_csid_path_cfg *path_data;
2474 struct cam_ife_csid_cid_data *cid_data;
2475 struct cam_hw_info *csid_hw_info;
2476 struct cam_ife_csid_hw *ife_csid_hw = NULL;
2477
2478 if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
2479 pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
2480 csid_idx);
2481 return rc;
2482 }
2483
2484 csid_hw_info = (struct cam_hw_info *) csid_hw_intf->hw_priv;
2485 ife_csid_hw = (struct cam_ife_csid_hw *) csid_hw_info->core_info;
2486
2487 ife_csid_hw->hw_intf = csid_hw_intf;
2488 ife_csid_hw->hw_info = csid_hw_info;
2489
2490 CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
2491 ife_csid_hw->hw_intf->hw_type, csid_idx);
2492
2493
2494 ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
2495 mutex_init(&ife_csid_hw->hw_info->hw_mutex);
2496 spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
2497 init_completion(&ife_csid_hw->hw_info->hw_complete);
2498
2499 init_completion(&ife_csid_hw->csid_top_complete);
2500 init_completion(&ife_csid_hw->csid_csi2_complete);
2501 init_completion(&ife_csid_hw->csid_ipp_complete);
2502 for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
2503 init_completion(&ife_csid_hw->csid_rdin_complete[i]);
2504
2505
2506 rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
2507 cam_ife_csid_irq, ife_csid_hw);
2508 if (rc < 0) {
2509 pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
2510 csid_idx);
2511 goto err;
2512 }
2513
2514 ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
2515 ife_csid_hw->hw_intf->hw_ops.init = cam_ife_csid_init_hw;
2516 ife_csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_deinit_hw;
2517 ife_csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_reset;
2518 ife_csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_reserve;
2519 ife_csid_hw->hw_intf->hw_ops.release = cam_ife_csid_release;
2520 ife_csid_hw->hw_intf->hw_ops.start = cam_ife_csid_start;
2521 ife_csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_stop;
2522 ife_csid_hw->hw_intf->hw_ops.read = cam_ife_csid_read;
2523 ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_write;
2524 ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
2525
2526 /*Initialize the CID resoure */
2527 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
2528 ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
2529 ife_csid_hw->cid_res[i].res_id = i;
2530 ife_csid_hw->cid_res[i].res_state =
2531 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2532 ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
2533
2534 cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
2535 GFP_KERNEL);
2536 if (!cid_data) {
2537 rc = -ENOMEM;
2538 goto err;
2539 }
2540 ife_csid_hw->cid_res[i].res_priv = cid_data;
2541 }
2542
2543 /* Initialize the IPP resources */
2544 if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
2545 ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
2546 ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
2547 ife_csid_hw->ipp_res.res_state =
2548 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2549 ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
2550 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2551 GFP_KERNEL);
2552 if (!path_data) {
2553 rc = -ENOMEM;
2554 goto err;
2555 }
2556 ife_csid_hw->ipp_res.res_priv = path_data;
2557 }
2558
2559 /* Initialize the RDI resource */
2560 for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2561 i++) {
2562 /* res type is from RDI 0 to RDI3 */
2563 ife_csid_hw->rdi_res[i].res_type =
2564 CAM_ISP_RESOURCE_PIX_PATH;
2565 ife_csid_hw->rdi_res[i].res_id = i;
2566 ife_csid_hw->rdi_res[i].res_state =
2567 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2568 ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
2569
2570 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2571 GFP_KERNEL);
2572 if (!path_data) {
2573 rc = -ENOMEM;
2574 goto err;
2575 }
2576 ife_csid_hw->rdi_res[i].res_priv = path_data;
2577 }
2578
2579 return 0;
2580err:
2581 if (rc) {
2582 kfree(ife_csid_hw->ipp_res.res_priv);
2583 for (i = 0; i <
2584 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
2585 kfree(ife_csid_hw->rdi_res[i].res_priv);
2586
2587 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2588 kfree(ife_csid_hw->cid_res[i].res_priv);
2589
2590 }
2591
2592 return rc;
2593}
2594
2595
2596int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
2597{
2598 int rc = -EINVAL;
2599 uint32_t i;
2600
2601 if (!ife_csid_hw) {
2602 pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
2603 return rc;
2604 }
2605
2606 /* release the privdate data memory from resources */
2607 kfree(ife_csid_hw->ipp_res.res_priv);
2608 for (i = 0; i <
2609 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2610 i++) {
2611 kfree(ife_csid_hw->rdi_res[i].res_priv);
2612 }
2613 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2614 kfree(ife_csid_hw->cid_res[i].res_priv);
2615
Ravikishore Pampanaad6bc902017-07-12 19:37:06 +05302616 cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
Jing Zhouff57d862017-03-21 00:54:25 -07002617
2618 return 0;
2619}
2620
2621