blob: 14ed606ea50fa838ffa0b7d8c50992c3bed083a5 [file] [log] [blame]
Jing Zhouff57d862017-03-21 00:54:25 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/iopoll.h>
14#include <linux/slab.h>
15#include <uapi/media/cam_isp.h>
16#include <uapi/media/cam_defs.h>
17
18#include "cam_ife_csid_core.h"
19#include "cam_isp_hw.h"
20#include "cam_soc_util.h"
21#include "cam_io_util.h"
22
23#undef CDBG
24#define CDBG(fmt, args...) pr_debug(fmt, ##args)
25
26
27/* Timeout value in msec */
28#define IFE_CSID_TIMEOUT 1000
29
30/* TPG VC/DT values */
31#define CAM_IFE_CSID_TPG_VC_VAL 0xA
32#define CAM_IFE_CSID_TPG_DT_VAL 0x2B
33
34/* Timeout values in usec */
35#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000
36#define CAM_IFE_CSID_TIMEOUT_ALL_US 1000000
37
38static int cam_ife_csid_is_ipp_format_supported(
39 uint32_t decode_fmt)
40{
41 int rc = -EINVAL;
42
43 switch (decode_fmt) {
44 case CAM_FORMAT_MIPI_RAW_6:
45 case CAM_FORMAT_MIPI_RAW_8:
46 case CAM_FORMAT_MIPI_RAW_10:
47 case CAM_FORMAT_MIPI_RAW_12:
48 case CAM_FORMAT_MIPI_RAW_14:
49 case CAM_FORMAT_MIPI_RAW_16:
50 case CAM_FORMAT_MIPI_RAW_20:
51 case CAM_FORMAT_DPCM_10_6_10:
52 case CAM_FORMAT_DPCM_10_8_10:
53 case CAM_FORMAT_DPCM_12_6_12:
54 case CAM_FORMAT_DPCM_12_8_12:
55 case CAM_FORMAT_DPCM_14_8_14:
56 case CAM_FORMAT_DPCM_14_10_14:
57 rc = 0;
58 break;
59 default:
60 break;
61 }
62 return rc;
63}
64
65static int cam_ife_csid_get_format(uint32_t res_id,
66 uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
67{
68 int rc = 0;
69
70 if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
71 res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
72 *path_fmt = 0xf;
73 return 0;
74 }
75
76 switch (decode_fmt) {
77 case CAM_FORMAT_MIPI_RAW_6:
78 *path_fmt = 0;
79 *plain_fmt = 0;
80 break;
81 case CAM_FORMAT_MIPI_RAW_8:
82 *path_fmt = 1;
83 *plain_fmt = 0;
84 break;
85 case CAM_FORMAT_MIPI_RAW_10:
86 *path_fmt = 2;
87 *plain_fmt = 1;
88 break;
89 case CAM_FORMAT_MIPI_RAW_12:
90 *path_fmt = 3;
91 *plain_fmt = 1;
92 break;
93 case CAM_FORMAT_MIPI_RAW_14:
94 *path_fmt = 4;
95 *plain_fmt = 1;
96 break;
97 case CAM_FORMAT_MIPI_RAW_16:
98 *path_fmt = 5;
99 *plain_fmt = 1;
100 break;
101 case CAM_FORMAT_MIPI_RAW_20:
102 *path_fmt = 6;
103 *plain_fmt = 2;
104 break;
105 case CAM_FORMAT_DPCM_10_6_10:
106 *path_fmt = 7;
107 *plain_fmt = 1;
108 break;
109 case CAM_FORMAT_DPCM_10_8_10:
110 *path_fmt = 8;
111 *plain_fmt = 1;
112 break;
113 case CAM_FORMAT_DPCM_12_6_12:
114 *path_fmt = 9;
115 *plain_fmt = 1;
116 break;
117 case CAM_FORMAT_DPCM_12_8_12:
118 *path_fmt = 0xA;
119 *plain_fmt = 1;
120 break;
121 case CAM_FORMAT_DPCM_14_8_14:
122 *path_fmt = 0xB;
123 *plain_fmt = 1;
124 break;
125 case CAM_FORMAT_DPCM_14_10_14:
126 *path_fmt = 0xC;
127 *plain_fmt = 1;
128 break;
129 default:
130 pr_err("%s:%d:CSID:%d un supported format\n",
131 __func__, __LINE__, decode_fmt);
132 rc = -EINVAL;
133 }
134
135 return rc;
136}
137
138static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
139 struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
140 uint32_t res_type)
141{
142 int rc = 0;
143 struct cam_ife_csid_cid_data *cid_data;
144 uint32_t i = 0, j = 0;
145
146 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
147 if (csid_hw->cid_res[i].res_state >=
148 CAM_ISP_RESOURCE_STATE_RESERVED) {
149 cid_data = (struct cam_ife_csid_cid_data *)
150 csid_hw->cid_res[i].res_priv;
151 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
152 if (cid_data->tpg_set) {
153 cid_data->cnt++;
154 *res = &csid_hw->cid_res[i];
155 break;
156 }
157 } else {
158 if (cid_data->vc == vc && cid_data->dt == dt) {
159 cid_data->cnt++;
160 *res = &csid_hw->cid_res[i];
161 break;
162 }
163 }
164 }
165 }
166
167 if (i == CAM_IFE_CSID_CID_RES_MAX) {
168 if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
169 pr_err("%s:%d:CSID:%d TPG CID not available\n",
170 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
171 rc = -EINVAL;
172 }
173
174 for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
175 if (csid_hw->cid_res[j].res_state ==
176 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
177 cid_data = (struct cam_ife_csid_cid_data *)
178 csid_hw->cid_res[j].res_priv;
179 cid_data->vc = vc;
180 cid_data->dt = dt;
181 cid_data->cnt = 1;
182 csid_hw->cid_res[j].res_state =
183 CAM_ISP_RESOURCE_STATE_RESERVED;
184 *res = &csid_hw->cid_res[j];
185 CDBG("%s:%d:CSID:%d CID %d allocated\n",
186 __func__, __LINE__,
187 csid_hw->hw_intf->hw_idx,
188 csid_hw->cid_res[j].res_id);
189 break;
190 }
191 }
192
193 if (j == CAM_IFE_CSID_CID_RES_MAX) {
194 pr_err("%s:%d:CSID:%d Free cid is not available\n",
195 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
196 rc = -EINVAL;
197 }
198 }
199
200 return rc;
201}
202
203
204static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
205{
206 struct cam_hw_soc_info *soc_info;
207 struct cam_ife_csid_reg_offset *csid_reg;
208 int rc = 0;
209 uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
210 irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
211
212 soc_info = &csid_hw->hw_info->soc_info;
213 csid_reg = csid_hw->csid_info->csid_reg;
214
215 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
216 pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
217 __LINE__, csid_hw->hw_intf->hw_idx,
218 csid_hw->hw_info->hw_state);
219 return -EINVAL;
220 }
221
222 CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
223 csid_hw->hw_intf->hw_idx);
224
225 init_completion(&csid_hw->csid_top_complete);
226
227 /* Save interrupt mask registers values*/
228 irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
229 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
230
231 if (csid_reg->cmn_reg->no_pix)
232 irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
233 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
234
235 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
236 irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
237 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
238 }
239
240 /* Mask all interrupts */
241 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
242 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
243
244 if (csid_reg->cmn_reg->no_pix)
245 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
246 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
247
248 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
249 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
250 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
251
252 /* clear all interrupts */
253 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
254 csid_reg->cmn_reg->csid_top_irq_clear_addr);
255
256 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
257 soc_info->reg_map[0].mem_base +
258 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
259
260 if (csid_reg->cmn_reg->no_pix)
261 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
262 soc_info->reg_map[0].mem_base +
263 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
264
265 for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
266 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
267 soc_info->reg_map[0].mem_base +
268 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
269
270 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
271 csid_reg->cmn_reg->csid_irq_cmd_addr);
272
273 cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
274 csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
275
276 /* enable the IPP and RDI format measure */
277 if (csid_reg->cmn_reg->no_pix)
278 cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
279 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
280
281 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
282 cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
283 csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
284
285 /* perform the top CSID HW reset */
286 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
287 soc_info->reg_map[0].mem_base +
288 csid_reg->cmn_reg->csid_rst_strobes_addr);
289
290 CDBG("%s:%d: Waiting for reset complete from irq handler\n",
291 __func__, __LINE__);
292
293 rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
294 msecs_to_jiffies(IFE_CSID_TIMEOUT));
295 if (rc <= 0) {
296 pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
297 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
298 if (rc == 0)
299 rc = -ETIMEDOUT;
300 } else {
301 rc = 0;
302 }
303
304 /*restore all interrupt masks */
305 cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
306 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
307
308 if (csid_reg->cmn_reg->no_pix)
309 cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
310 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
311
312 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
313 cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
314 csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
315
316 return rc;
317}
318
319static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
320 struct cam_csid_reset_cfg_args *reset)
321{
322 int rc = 0;
323 struct cam_hw_soc_info *soc_info;
324 struct cam_isp_resource_node *res;
325 struct cam_ife_csid_reg_offset *csid_reg;
326 uint32_t reset_strb_addr, reset_strb_val, val, id;
327 struct completion *complete;
328
329 csid_reg = csid_hw->csid_info->csid_reg;
330 soc_info = &csid_hw->hw_info->soc_info;
331 res = reset->node_res;
332
333 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
334 pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
335 __LINE__, csid_hw->hw_intf->hw_idx,
336 csid_hw->hw_info->hw_state);
337 return -EINVAL;
338 }
339
340 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
341 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
342 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
343 rc = -EINVAL;
344 goto end;
345 }
346
347 CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
348 csid_hw->hw_intf->hw_idx, res->res_id);
349
350 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
351 if (!csid_reg->ipp_reg) {
352 pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
353 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
354 res->res_id);
355 return -EINVAL;
356 }
357
358 reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
359 complete = &csid_hw->csid_ipp_complete;
360
361 /* Enable path reset done interrupt */
362 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
363 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
364 val |= CSID_PATH_INFO_RST_DONE;
365 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
366 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
367
368 } else {
369 id = res->res_id;
370 if (!csid_reg->rdi_reg[id]) {
371 pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
372 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
373 res->res_id);
374 return -EINVAL;
375 }
376
377 reset_strb_addr =
378 csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
379 complete =
380 &csid_hw->csid_rdin_complete[id];
381
382 /* Enable path reset done interrupt */
383 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
384 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
385 val |= CSID_PATH_INFO_RST_DONE;
386 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
387 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
388 }
389
390 init_completion(complete);
391 reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
392
393 /* Enable the Test gen before reset */
394 cam_io_w_mb(1, csid_hw->hw_info->soc_info.reg_map[0].mem_base +
395 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
396
397 /* Reset the corresponding ife csid path */
398 cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
399 reset_strb_addr);
400
401 rc = wait_for_completion_timeout(complete,
402 msecs_to_jiffies(IFE_CSID_TIMEOUT));
403 if (rc <= 0) {
404 pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
405 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
406 res->res_id, rc);
407 if (rc == 0)
408 rc = -ETIMEDOUT;
409 }
410
411 /* Disable Test Gen after reset*/
412 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
413 csid_reg->tpg_reg->csid_tpg_ctrl_addr);
414
415end:
416 return rc;
417
418}
419
420static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
421 struct cam_csid_hw_reserve_resource_args *cid_reserv)
422{
423 int rc = 0;
424 struct cam_ife_csid_cid_data *cid_data;
425
426 CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
427 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
428 cid_reserv->in_port->res_type,
429 cid_reserv->in_port->lane_type,
430 cid_reserv->in_port->lane_num,
431 cid_reserv->in_port->dt,
432 cid_reserv->in_port->vc);
433
434 if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
435 pr_err("%s:%d:CSID:%d Invalid phy sel %d\n", __func__,
436 __LINE__, csid_hw->hw_intf->hw_idx,
437 cid_reserv->in_port->res_type);
438 rc = -EINVAL;
439 goto end;
440 }
441
442 if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
443 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
444 pr_err("%s:%d:CSID:%d Invalid lane type %d\n", __func__,
445 __LINE__, csid_hw->hw_intf->hw_idx,
446 cid_reserv->in_port->lane_type);
447 rc = -EINVAL;
448 goto end;
449 }
450
451 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
452 cid_reserv->in_port->lane_num > 4) &&
453 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
454 pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
455 __LINE__, csid_hw->hw_intf->hw_idx,
456 cid_reserv->in_port->lane_num);
457 rc = -EINVAL;
458 goto end;
459 }
460 if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
461 cid_reserv->in_port->lane_num > 3) &&
462 cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
463 pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
464 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
465 cid_reserv->in_port->lane_type,
466 cid_reserv->in_port->lane_num);
467 rc = -EINVAL;
468 goto end;
469 }
470
471 /* CSID CSI2 v2.0 supports 31 vc */
472 if (cid_reserv->in_port->dt > 0x3f ||
473 cid_reserv->in_port->vc > 0x1f) {
474 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
475 __LINE__, csid_hw->hw_intf->hw_idx,
476 cid_reserv->in_port->vc, cid_reserv->in_port->dt);
477 rc = -EINVAL;
478 goto end;
479 }
480
481 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
482 (cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
483 cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
484 pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
485 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
486 cid_reserv->in_port->format);
487 rc = -EINVAL;
488 goto end;
489 }
490
491 if (csid_hw->csi2_reserve_cnt) {
492 /* current configure res type should match requested res type */
493 if (csid_hw->res_type != cid_reserv->in_port->res_type) {
494 rc = -EINVAL;
495 goto end;
496 }
497
498 if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
499 if (csid_hw->csi2_rx_cfg.lane_cfg !=
500 cid_reserv->in_port->lane_cfg ||
501 csid_hw->csi2_rx_cfg.lane_type !=
502 cid_reserv->in_port->lane_type ||
503 csid_hw->csi2_rx_cfg.lane_num !=
504 cid_reserv->in_port->lane_num) {
505 rc = -EINVAL;
506 goto end;
507 }
508 } else {
509 if (csid_hw->tpg_cfg.decode_fmt !=
510 cid_reserv->in_port->format ||
511 csid_hw->tpg_cfg.width !=
512 cid_reserv->in_port->left_width ||
513 csid_hw->tpg_cfg.height !=
514 cid_reserv->in_port->height ||
515 csid_hw->tpg_cfg.test_pattern !=
516 cid_reserv->in_port->test_pattern) {
517 rc = -EINVAL;
518 goto end;
519 }
520 }
521 }
522
523 if (!csid_hw->csi2_reserve_cnt) {
524 csid_hw->res_type = cid_reserv->in_port->res_type;
525 /* Take the first CID resource*/
526 csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
527 cid_data = (struct cam_ife_csid_cid_data *)
528 csid_hw->cid_res[0].res_priv;
529
530 csid_hw->csi2_rx_cfg.lane_cfg =
531 cid_reserv->in_port->lane_cfg;
532 csid_hw->csi2_rx_cfg.lane_type =
533 cid_reserv->in_port->lane_type;
534 csid_hw->csi2_rx_cfg.lane_num =
535 cid_reserv->in_port->lane_num;
536
537 if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
538 csid_hw->csi2_rx_cfg.phy_sel = 0;
539 if (cid_reserv->in_port->format >
540 CAM_FORMAT_MIPI_RAW_16) {
541 pr_err("%s:%d: Wrong TPG format\n", __func__,
542 __LINE__);
543 rc = -EINVAL;
544 goto end;
545 }
546 csid_hw->tpg_cfg.decode_fmt =
547 cid_reserv->in_port->format;
548 csid_hw->tpg_cfg.width =
549 cid_reserv->in_port->left_width;
550 csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
551 csid_hw->tpg_cfg.test_pattern =
552 cid_reserv->in_port->test_pattern;
553 cid_data->tpg_set = 1;
554 } else {
555 csid_hw->csi2_rx_cfg.phy_sel =
556 (cid_reserv->in_port->res_type & 0xFF) - 1;
557 }
558
559 cid_data->vc = cid_reserv->in_port->vc;
560 cid_data->dt = cid_reserv->in_port->dt;
561 cid_data->cnt = 1;
562 cid_reserv->node_res = &csid_hw->cid_res[0];
563 csid_hw->csi2_reserve_cnt++;
564
565 CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
566 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
567 cid_reserv->node_res->res_id);
568 } else {
569 rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
570 cid_reserv->in_port->vc, cid_reserv->in_port->dt,
571 cid_reserv->in_port->res_type);
572 /* if success then increment the reserve count */
573 if (!rc) {
574 if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
575 pr_err("%s:%d:CSID%d reserve cnt reached max\n",
576 __func__, __LINE__,
577 csid_hw->hw_intf->hw_idx);
578 rc = -EINVAL;
579 } else {
580 csid_hw->csi2_reserve_cnt++;
581 CDBG("%s:%d:CSID:%d CID:%d acquired\n",
582 __func__, __LINE__,
583 csid_hw->hw_intf->hw_idx,
584 cid_reserv->node_res->res_id);
585 }
586 }
587 }
588
589end:
590 return rc;
591}
592
593
594static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
595 struct cam_csid_hw_reserve_resource_args *reserve)
596{
597 int rc = 0;
598 struct cam_ife_csid_path_cfg *path_data;
599 struct cam_isp_resource_node *res;
600
601 /* CSID CSI2 v2.0 supports 31 vc */
602 if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
603 (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
604 pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
605 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
606 reserve->in_port->vc, reserve->in_port->dt,
607 reserve->sync_mode);
608 rc = -EINVAL;
609 goto end;
610 }
611
612 switch (reserve->res_id) {
613 case CAM_IFE_PIX_PATH_RES_IPP:
614 if (csid_hw->ipp_res.res_state !=
615 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
616 CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
617 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
618 csid_hw->ipp_res.res_state);
619 rc = -EINVAL;
620 goto end;
621 }
622
623 if (cam_ife_csid_is_ipp_format_supported(
624 reserve->in_port->format)) {
625 pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
626 __func__, __LINE__,
627 csid_hw->hw_intf->hw_idx, reserve->res_id,
628 reserve->in_port->format);
629 rc = -EINVAL;
630 goto end;
631 }
632
633 /* assign the IPP resource */
634 res = &csid_hw->ipp_res;
635 CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
636 __func__, __LINE__,
637 csid_hw->hw_intf->hw_idx, res->res_id);
638
639 break;
640 case CAM_IFE_PIX_PATH_RES_RDI_0:
641 case CAM_IFE_PIX_PATH_RES_RDI_1:
642 case CAM_IFE_PIX_PATH_RES_RDI_2:
643 case CAM_IFE_PIX_PATH_RES_RDI_3:
644 if (csid_hw->rdi_res[reserve->res_id].res_state !=
645 CAM_ISP_RESOURCE_STATE_AVAILABLE) {
646 CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
647 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
648 reserve->res_id,
649 csid_hw->rdi_res[reserve->res_id].res_state);
650 rc = -EINVAL;
651 goto end;
652 } else {
653 res = &csid_hw->rdi_res[reserve->res_id];
654 CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
655 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
656 res->res_id);
657 }
658
659 break;
660 default:
661 pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
662 __func__, __LINE__,
663 csid_hw->hw_intf->hw_idx, reserve->res_id);
664 rc = -EINVAL;
665 goto end;
666 }
667
668 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
669 path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
670
671 path_data->cid = reserve->cid;
672 path_data->decode_fmt = reserve->in_port->format;
673 path_data->master_idx = reserve->master_idx;
674 path_data->sync_mode = reserve->sync_mode;
675 path_data->height = reserve->in_port->height;
676 path_data->start_line = reserve->in_port->line_start;
677 if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
678 path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
679 path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
680 } else {
681 path_data->dt = reserve->in_port->dt;
682 path_data->vc = reserve->in_port->vc;
683 }
684
685 if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
686 path_data->crop_enable = 1;
687 path_data->start_pixel = reserve->in_port->left_start;
688 path_data->width = reserve->in_port->left_width;
689 } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
690 path_data->crop_enable = 1;
691 path_data->start_pixel = reserve->in_port->right_start;
692 path_data->width = reserve->in_port->right_width;
693 } else
694 path_data->crop_enable = 0;
695
696 reserve->node_res = res;
697
698end:
699 return rc;
700}
701
702static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
703{
704 int rc = 0;
705 struct cam_ife_csid_reg_offset *csid_reg;
706 struct cam_hw_soc_info *soc_info;
707 uint32_t i, status, val;
708
709 csid_reg = csid_hw->csid_info->csid_reg;
710 soc_info = &csid_hw->hw_info->soc_info;
711
712 /* overflow check before increment */
713 if (csid_hw->hw_info->open_count == UINT_MAX) {
714 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
715 __LINE__, csid_hw->hw_intf->hw_idx);
716 return -EINVAL;
717 }
718
719 /* Increment ref Count */
720 csid_hw->hw_info->open_count++;
721 if (csid_hw->hw_info->open_count > 1) {
722 CDBG("%s:%d: CSID hw has already been enabled\n",
723 __func__, __LINE__);
724 return rc;
725 }
726
727 CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
728 csid_hw->hw_intf->hw_idx);
729
730 rc = cam_ife_csid_enable_soc_resources(soc_info);
731 if (rc) {
732 pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
733 csid_hw->hw_intf->hw_idx);
734 goto err;
735 }
736
737
738 CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
739 csid_hw->hw_intf->hw_idx);
740
741 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
742 /* Enable the top IRQ interrupt */
743 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
744 csid_reg->cmn_reg->csid_top_irq_mask_addr);
745
746 rc = cam_ife_csid_global_reset(csid_hw);
747 if (rc) {
748 pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
749 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
750 rc = -ETIMEDOUT;
751 goto disable_soc;
752 }
753
754 /*
755 * Reset the SW registers
756 * SW register reset also reset the mask irq, so poll the irq status
757 * to check the reset complete.
758 */
759 CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
760 csid_hw->hw_intf->hw_idx);
761
762 cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
763 soc_info->reg_map[0].mem_base +
764 csid_reg->cmn_reg->csid_rst_strobes_addr);
765
766 rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
767 csid_reg->cmn_reg->csid_top_irq_status_addr,
768 status, (status & 0x1) == 0x1,
769 CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
770 if (rc < 0) {
771 pr_err("%s:%d: software register reset timeout.....\n",
772 __func__, __LINE__);
773 rc = -ETIMEDOUT;
774 goto disable_soc;
775 }
776
777 /* clear all interrupts */
778 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
779 csid_reg->cmn_reg->csid_top_irq_clear_addr);
780
781 cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
782 soc_info->reg_map[0].mem_base +
783 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
784
785 if (csid_reg->cmn_reg->no_pix)
786 cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
787 soc_info->reg_map[0].mem_base +
788 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
789
790 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
791 cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
792 soc_info->reg_map[0].mem_base +
793 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
794
795 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
796 csid_reg->cmn_reg->csid_irq_cmd_addr);
797
798 /* Enable the top IRQ interrupt */
799 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
800 csid_reg->cmn_reg->csid_top_irq_mask_addr);
801
802 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
803 csid_reg->cmn_reg->csid_hw_version_addr);
804 CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
805 csid_hw->hw_intf->hw_idx, val);
806
807 return 0;
808
809disable_soc:
810 cam_ife_csid_disable_soc_resources(soc_info);
811 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
812err:
813 csid_hw->hw_info->open_count--;
814 return rc;
815}
816
817static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
818{
819 int rc = 0;
820 struct cam_hw_soc_info *soc_info;
821 struct cam_ife_csid_reg_offset *csid_reg;
822
823
824 /* Decrement ref Count */
825 if (csid_hw->hw_info->open_count)
826 csid_hw->hw_info->open_count--;
827 if (csid_hw->hw_info->open_count)
828 return rc;
829
830 soc_info = &csid_hw->hw_info->soc_info;
831 csid_reg = csid_hw->csid_info->csid_reg;
832
833 CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
834 csid_hw->hw_intf->hw_idx);
835
836 /*disable the top IRQ interrupt */
837 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
838 csid_reg->cmn_reg->csid_top_irq_mask_addr);
839
840 rc = cam_ife_csid_disable_soc_resources(soc_info);
841 if (rc)
842 pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
843 __LINE__, csid_hw->hw_intf->hw_idx);
844
845 csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
846 return rc;
847}
848
849
850static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw *csid_hw,
851 struct cam_isp_resource_node *res)
852{
853 uint32_t val = 0;
854 struct cam_hw_soc_info *soc_info;
855
856 csid_hw->tpg_start_cnt++;
857 if (csid_hw->tpg_start_cnt == 1) {
858 /*Enable the TPG */
859 CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
860 __LINE__, csid_hw->hw_intf->hw_idx);
861
862 soc_info = &csid_hw->hw_info->soc_info;
863 {
864 uint32_t val;
865 uint32_t i;
866 uint32_t base = 0x600;
867
868 CDBG("%s:%d: ================== TPG ===============\n",
869 __func__, __LINE__);
870 for (i = 0; i < 16; i++) {
871 val = cam_io_r_mb(
872 soc_info->reg_map[0].mem_base +
873 base + i * 4);
874 CDBG("%s:%d reg 0x%x = 0x%x\n",
875 __func__, __LINE__,
876 (base + i*4), val);
877 }
878
879 CDBG("%s:%d: ================== IPP ===============\n",
880 __func__, __LINE__);
881 base = 0x200;
882 for (i = 0; i < 10; i++) {
883 val = cam_io_r_mb(
884 soc_info->reg_map[0].mem_base +
885 base + i * 4);
886 CDBG("%s:%d reg 0x%x = 0x%x\n",
887 __func__, __LINE__,
888 (base + i*4), val);
889 }
890
891 CDBG("%s:%d: ================== RX ===============\n",
892 __func__, __LINE__);
893 base = 0x100;
894 for (i = 0; i < 5; i++) {
895 val = cam_io_r_mb(
896 soc_info->reg_map[0].mem_base +
897 base + i * 4);
898 CDBG("%s:%d reg 0x%x = 0x%x\n",
899 __func__, __LINE__,
900 (base + i*4), val);
901 }
902 }
903
904 CDBG("%s:%d: =============== TPG control ===============\n",
905 __func__, __LINE__);
906 val = (4 << 20);
907 val |= (0x80 << 8);
908 val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
909 val |= 7;
910 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
911 csid_hw->csid_info->csid_reg->tpg_reg->
912 csid_tpg_ctrl_addr);
913
914 val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
915 CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
916 0x600, val);
917 }
918
919 return 0;
920}
921
922static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw *csid_hw,
923 struct cam_isp_resource_node *res)
924{
925 struct cam_hw_soc_info *soc_info;
926
927 if (csid_hw->tpg_start_cnt)
928 csid_hw->tpg_start_cnt--;
929
930 if (csid_hw->tpg_start_cnt)
931 return 0;
932
933 soc_info = &csid_hw->hw_info->soc_info;
934
935 /* disable the TPG */
936 if (!csid_hw->tpg_start_cnt) {
937 CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
938 __LINE__, csid_hw->hw_intf->hw_idx);
939
940 /*stop the TPG */
941 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
942 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
943 }
944
945 return 0;
946}
947
948
949static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw *csid_hw,
950 struct cam_isp_resource_node *res)
951{
952 struct cam_ife_csid_reg_offset *csid_reg;
953 struct cam_hw_soc_info *soc_info;
954 uint32_t val = 0;
955
956 csid_reg = csid_hw->csid_info->csid_reg;
957 soc_info = &csid_hw->hw_info->soc_info;
958
959 CDBG("%s:%d CSID:%d TPG config\n", __func__,
960 __LINE__, csid_hw->hw_intf->hw_idx);
961
962 /* configure one DT, infinite frames */
963 val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
964 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
965 csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
966
967 /* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
968 val = (0x740 << 12) | 0x740;
969 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
970 csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
971
972 cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
973 csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
974
975 val = csid_hw->tpg_cfg.width << 16 |
976 csid_hw->tpg_cfg.height;
977 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
978 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
979
980 cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
981 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
982
983 /*
984 * decode_fmt is the same as the input resource format.
985 * it is one larger than the register spec format.
986 */
987 val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
988 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
989 csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
990
Jing Zhoua4e9fbe2017-05-15 14:37:21 -0700991 /* static frame with split color bar */
992 val = 1 << 5;
Jing Zhouff57d862017-03-21 00:54:25 -0700993 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
994 csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
995 /* config pix pattern */
996 cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
997 soc_info->reg_map[0].mem_base +
998 csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
999
1000 return 0;
1001}
1002
1003static int cam_ife_csid_enable_csi2(
1004 struct cam_ife_csid_hw *csid_hw,
1005 struct cam_isp_resource_node *res)
1006{
1007 int rc = 0;
1008 struct cam_ife_csid_reg_offset *csid_reg;
1009 struct cam_hw_soc_info *soc_info;
1010 struct cam_ife_csid_cid_data *cid_data;
1011 uint32_t val = 0;
1012
1013 csid_reg = csid_hw->csid_info->csid_reg;
1014 soc_info = &csid_hw->hw_info->soc_info;
1015 CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
1016 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1017
1018 /* overflow check before increment */
1019 if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
1020 pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
1021 __LINE__, csid_hw->hw_intf->hw_idx);
1022 return -EINVAL;
1023 }
1024
1025 cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
1026
1027 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1028 csid_hw->csi2_cfg_cnt++;
1029 if (csid_hw->csi2_cfg_cnt > 1)
1030 return rc;
1031
1032 /* rx cfg0 */
1033 val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
1034 (csid_hw->csi2_rx_cfg.lane_cfg << 4) |
1035 (csid_hw->csi2_rx_cfg.lane_type << 24);
1036 val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
1037 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1038 csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
1039
1040 /* rx cfg1*/
1041 val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
1042 /* if VC value is more than 3 than set full width of VC */
1043 if (cid_data->vc > 3)
1044 val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
1045
1046 /* enable packet ecc correction */
1047 val |= 1;
1048 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1049 csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
1050
1051 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
1052 /* Config the TPG */
1053 rc = cam_ife_csid_config_tpg(csid_hw, res);
1054 if (rc) {
1055 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1056 return rc;
1057 }
1058 }
1059
1060 /*Enable the CSI2 rx inerrupts */
1061 val = CSID_CSI2_RX_INFO_RST_DONE |
1062 CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
1063 CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
1064 CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
1065 CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
1066 CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
1067 CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
1068 CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
1069 CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
1070 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1071 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1072
1073 return 0;
1074}
1075
1076static int cam_ife_csid_disable_csi2(
1077 struct cam_ife_csid_hw *csid_hw,
1078 struct cam_isp_resource_node *res)
1079{
1080 struct cam_ife_csid_reg_offset *csid_reg;
1081 struct cam_hw_soc_info *soc_info;
1082
1083 if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
1084 pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
1085 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1086 return -EINVAL;
1087 }
1088
1089 csid_reg = csid_hw->csid_info->csid_reg;
1090 soc_info = &csid_hw->hw_info->soc_info;
1091 CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
1092 __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
1093
1094 if (csid_hw->csi2_cfg_cnt)
1095 csid_hw->csi2_cfg_cnt--;
1096
1097 if (csid_hw->csi2_cfg_cnt)
1098 return 0;
1099
1100 /*Disable the CSI2 rx inerrupts */
1101 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1102 csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
1103
1104 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1105
1106 return 0;
1107}
1108
1109static int cam_ife_csid_init_config_ipp_path(
1110 struct cam_ife_csid_hw *csid_hw,
1111 struct cam_isp_resource_node *res)
1112{
1113 int rc = 0;
1114 struct cam_ife_csid_path_cfg *path_data;
1115 struct cam_ife_csid_reg_offset *csid_reg;
1116 struct cam_hw_soc_info *soc_info;
1117 uint32_t path_format = 0, plain_format = 0, val = 0;
1118
1119 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1120 csid_reg = csid_hw->csid_info->csid_reg;
1121 soc_info = &csid_hw->hw_info->soc_info;
1122
1123 if (!csid_reg->ipp_reg) {
1124 pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
1125 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1126 res->res_id);
1127 return -EINVAL;
1128 }
1129
1130 CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
1131 rc = cam_ife_csid_get_format(res->res_id,
1132 path_data->decode_fmt, &path_format, &plain_format);
1133 if (rc)
1134 return rc;
1135
Jing Zhoubb536a82017-05-18 15:20:38 -07001136 /*
Jing Zhouff57d862017-03-21 00:54:25 -07001137 * configure the IPP and enable the time stamp capture.
1138 * enable the HW measrurement blocks
1139 */
1140 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1141 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1142 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1143 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1144 (path_data->crop_enable & 1 <<
1145 csid_reg->cmn_reg->crop_h_en_shift_val) |
1146 (path_data->crop_enable & 1 <<
1147 csid_reg->cmn_reg->crop_v_en_shift_val) |
1148 (1 << 1) | 1;
1149 val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
1150 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1151 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1152
Jing Zhoudedc4762017-06-19 17:45:36 +05301153 /* select the post irq sub sample strobe for time stamp capture */
1154 cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
1155 csid_reg->ipp_reg->csid_ipp_cfg1_addr);
1156
Jing Zhouff57d862017-03-21 00:54:25 -07001157 if (path_data->crop_enable) {
1158 val = ((path_data->width +
1159 path_data->start_pixel) & 0xFFFF <<
1160 csid_reg->cmn_reg->crop_shift) |
1161 (path_data->start_pixel & 0xFFFF);
1162
1163 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1164 csid_reg->ipp_reg->csid_ipp_hcrop_addr);
1165
1166 val = ((path_data->height +
1167 path_data->start_line) & 0xFFFF <<
1168 csid_reg->cmn_reg->crop_shift) |
1169 (path_data->start_line & 0xFFFF);
1170
1171 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1172 csid_reg->ipp_reg->csid_ipp_vcrop_addr);
1173 }
1174
1175 /* set frame drop pattern to 0 and period to 1 */
1176 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1177 csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
1178 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1179 csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
1180 /* set irq sub sample pattern to 0 and period to 1 */
1181 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1182 csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
1183 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1184 csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
1185 /* set pixel drop pattern to 0 and period to 1 */
1186 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1187 csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
1188 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1189 csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
1190 /* set line drop pattern to 0 and period to 1 */
1191 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1192 csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
1193 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1194 csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
1195
1196 /*Set master or slave IPP */
1197 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
1198 /*Set halt mode as master */
1199 val = CSID_HALT_MODE_MASTER << 2;
1200 else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
1201 /*Set halt mode as slave and set master idx */
1202 val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
1203 else
1204 /* Default is internal halt mode */
1205 val = 0;
1206
1207 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1208 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1209
1210 /* Enable the IPP path */
1211 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1212 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1213 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1214 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1215 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1216
1217 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1218
1219 return rc;
1220}
1221
1222static int cam_ife_csid_deinit_ipp_path(
1223 struct cam_ife_csid_hw *csid_hw,
1224 struct cam_isp_resource_node *res)
1225{
1226 int rc = 0;
1227 struct cam_ife_csid_reg_offset *csid_reg;
1228 struct cam_hw_soc_info *soc_info;
1229 uint32_t val = 0;
1230
1231 csid_reg = csid_hw->csid_info->csid_reg;
1232 soc_info = &csid_hw->hw_info->soc_info;
1233
1234 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1235 pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
1236 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1237 res->res_type, res->res_id, res->res_state);
1238 rc = -EINVAL;
1239 }
1240
1241 if (!csid_reg->ipp_reg) {
1242 pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
1243 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1244 res->res_id);
1245 rc = -EINVAL;
1246 }
1247
1248 /* Disable the IPP path */
1249 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1250 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1251 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1252 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1253 csid_reg->ipp_reg->csid_ipp_cfg0_addr);
1254
1255 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1256 return rc;
1257}
1258
1259static int cam_ife_csid_enable_ipp_path(
1260 struct cam_ife_csid_hw *csid_hw,
1261 struct cam_isp_resource_node *res)
1262{
1263 struct cam_ife_csid_reg_offset *csid_reg;
1264 struct cam_hw_soc_info *soc_info;
1265 struct cam_ife_csid_path_cfg *path_data;
1266 uint32_t val = 0;
1267
1268 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1269 csid_reg = csid_hw->csid_info->csid_reg;
1270 soc_info = &csid_hw->hw_info->soc_info;
1271
1272 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
1273 pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
1274 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1275 res->res_type, res->res_id, res->res_state);
1276 return -EINVAL;
1277 }
1278
1279 if (!csid_reg->ipp_reg) {
1280 pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
1281 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1282 res->res_id);
1283 return -EINVAL;
1284 }
1285
1286 CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
1287
1288 /*Resume at frame boundary */
1289 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1290 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1291 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1292 val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
1293 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1294 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1295 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
1296 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1297 soc_info->reg_map[0].mem_base +
1298 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1299 }
1300 /* for slave mode, not need to resume for slave device */
1301
1302 /* Enable the required ipp interrupts */
1303 val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
1304 CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
1305 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1306 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1307
1308 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1309
1310 return 0;
1311}
1312
1313static int cam_ife_csid_disable_ipp_path(
1314 struct cam_ife_csid_hw *csid_hw,
1315 struct cam_isp_resource_node *res,
1316 enum cam_ife_csid_halt_cmd stop_cmd)
1317{
1318 int rc = 0;
1319 struct cam_ife_csid_reg_offset *csid_reg;
1320 struct cam_hw_soc_info *soc_info;
1321 struct cam_ife_csid_path_cfg *path_data;
1322 uint32_t val = 0;
1323
1324 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1325 csid_reg = csid_hw->csid_info->csid_reg;
1326 soc_info = &csid_hw->hw_info->soc_info;
1327
1328 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1329 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1330 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1331 return -EINVAL;
1332 }
1333
1334 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1335 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1336 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1337 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1338 res->res_id, res->res_state);
1339 return rc;
1340 }
1341
1342 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1343 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1344 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1345 res->res_state);
1346 return -EINVAL;
1347 }
1348
1349 if (!csid_reg->ipp_reg) {
1350 pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
1351 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1352 return -EINVAL;
1353 }
1354
1355 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1356 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1357 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1358 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1359 return -EINVAL;
1360 }
1361
1362 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1363 csid_hw->hw_intf->hw_idx, res->res_id);
1364
1365 if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
1366 /* configure Halt */
1367 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1368 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1369 val &= ~0x3;
1370 val |= stop_cmd;
1371 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1372 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1373 } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
1374 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1375 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
1376
1377 /* For slave mode, halt command should take it from master */
1378
1379 /* Enable the EOF interrupt for resume at boundary case */
1380 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1381 init_completion(&csid_hw->csid_ipp_complete);
1382 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1383 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1384 val |= CSID_PATH_INFO_INPUT_EOF;
1385 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1386 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1387 } else {
1388 val &= ~(CSID_PATH_INFO_RST_DONE |
1389 CSID_PATH_ERROR_FIFO_OVERFLOW);
1390 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1391 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1392 }
1393
1394 return rc;
1395}
1396
1397
1398static int cam_ife_csid_init_config_rdi_path(
1399 struct cam_ife_csid_hw *csid_hw,
1400 struct cam_isp_resource_node *res)
1401{
1402 int rc = 0;
1403 struct cam_ife_csid_path_cfg *path_data;
1404 struct cam_ife_csid_reg_offset *csid_reg;
1405 struct cam_hw_soc_info *soc_info;
1406 uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
1407
1408 path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
1409 csid_reg = csid_hw->csid_info->csid_reg;
1410 soc_info = &csid_hw->hw_info->soc_info;
1411
1412 id = res->res_id;
1413 if (!csid_reg->rdi_reg[id]) {
1414 pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
1415 __func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
1416 return -EINVAL;
1417 }
1418
1419 rc = cam_ife_csid_get_format(res->res_id,
1420 path_data->decode_fmt, &path_format, &plain_fmt);
1421 if (rc)
1422 return rc;
1423
Jing Zhoubb536a82017-05-18 15:20:38 -07001424 /*
Jing Zhouff57d862017-03-21 00:54:25 -07001425 * RDI path config and enable the time stamp capture
1426 * Enable the measurement blocks
1427 */
1428 val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
1429 (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
1430 (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
1431 (path_format << csid_reg->cmn_reg->fmt_shift_val) |
1432 (plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
1433 (path_data->crop_enable & 1 <<
1434 csid_reg->cmn_reg->crop_h_en_shift_val) |
1435 (path_data->crop_enable & 1 <<
1436 csid_reg->cmn_reg->crop_v_en_shift_val) |
1437 (1 << 2) | 3;
1438
1439 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1440 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1441
Jing Zhoudedc4762017-06-19 17:45:36 +05301442 /* select the post irq sub sample strobe for time stamp capture */
1443 cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
1444 csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
1445
Jing Zhouff57d862017-03-21 00:54:25 -07001446 if (path_data->crop_enable) {
1447 val = ((path_data->width +
1448 path_data->start_pixel) & 0xFFFF <<
1449 csid_reg->cmn_reg->crop_shift) |
1450 (path_data->start_pixel & 0xFFFF);
1451
1452 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1453 csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
1454
1455 val = ((path_data->height +
1456 path_data->start_line) & 0xFFFF <<
1457 csid_reg->cmn_reg->crop_shift) |
1458 (path_data->start_line & 0xFFFF);
1459
1460 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1461 csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
1462 }
1463 /* set frame drop pattern to 0 and period to 1 */
1464 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1465 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
1466 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1467 csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
1468 /* set IRQ sum sabmple */
1469 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1470 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
1471 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1472 csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
1473
1474 /* set pixel drop pattern to 0 and period to 1 */
1475 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1476 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
1477 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1478 csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
1479 /* set line drop pattern to 0 and period to 1 */
1480 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1481 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
1482 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
1483 csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
1484
1485 /* Configure the halt mode */
1486 cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
1487 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1488
1489 /* Enable the RPP path */
1490 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1491 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1492 val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
1493 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1494 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1495
1496 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1497
1498 return rc;
1499}
1500
1501static int cam_ife_csid_deinit_rdi_path(
1502 struct cam_ife_csid_hw *csid_hw,
1503 struct cam_isp_resource_node *res)
1504{
1505 int rc = 0;
1506 struct cam_ife_csid_reg_offset *csid_reg;
1507 struct cam_hw_soc_info *soc_info;
1508 uint32_t val = 0, id;
1509
1510 csid_reg = csid_hw->csid_info->csid_reg;
1511 soc_info = &csid_hw->hw_info->soc_info;
1512 id = res->res_id;
1513
1514 if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1515 res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1516 !csid_reg->rdi_reg[id]) {
1517 pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
1518 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1519 res->res_state);
1520 return -EINVAL;
1521 }
1522
1523 /* Disable the RDI path */
1524 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1525 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1526 val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
1527 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1528 csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
1529
1530 res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
1531 return rc;
1532}
1533
1534static int cam_ife_csid_enable_rdi_path(
1535 struct cam_ife_csid_hw *csid_hw,
1536 struct cam_isp_resource_node *res)
1537{
1538 struct cam_ife_csid_reg_offset *csid_reg;
1539 struct cam_hw_soc_info *soc_info;
1540 uint32_t id, val;
1541
1542 csid_reg = csid_hw->csid_info->csid_reg;
1543 soc_info = &csid_hw->hw_info->soc_info;
1544 id = res->res_id;
1545
1546 if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
1547 res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
1548 !csid_reg->rdi_reg[id]) {
1549 pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
1550 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1551 res->res_type, res->res_id, res->res_state);
1552 return -EINVAL;
1553 }
1554
1555 /*resume at frame boundary */
1556 cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
1557 soc_info->reg_map[0].mem_base +
1558 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1559
1560 /* Enable the required RDI interrupts */
1561 val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
1562 CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
1563 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1564 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1565
1566 res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
1567
1568 return 0;
1569}
1570
1571
1572static int cam_ife_csid_disable_rdi_path(
1573 struct cam_ife_csid_hw *csid_hw,
1574 struct cam_isp_resource_node *res,
1575 enum cam_ife_csid_halt_cmd stop_cmd)
1576{
1577 int rc = 0;
1578 struct cam_ife_csid_reg_offset *csid_reg;
1579 struct cam_hw_soc_info *soc_info;
1580 uint32_t val = 0, id;
1581
1582 csid_reg = csid_hw->csid_info->csid_reg;
1583 soc_info = &csid_hw->hw_info->soc_info;
1584 id = res->res_id;
1585
1586 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
1587 !csid_reg->rdi_reg[res->res_id]) {
1588 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1589 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1590 return -EINVAL;
1591 }
1592
1593 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1594 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1595 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1596 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1597 res->res_id, res->res_state);
1598 return rc;
1599 }
1600
1601 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1602 CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
1603 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1604 res->res_state);
1605 return -EINVAL;
1606 }
1607
1608 if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
1609 stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1610 pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
1611 __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
1612 return -EINVAL;
1613 }
1614
1615
1616 CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
1617 csid_hw->hw_intf->hw_idx, res->res_id);
1618
1619 init_completion(&csid_hw->csid_rdin_complete[id]);
1620
1621 if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
1622 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1623 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1624 val |= CSID_PATH_INFO_INPUT_EOF;
1625 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1626 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1627 } else {
1628 val &= ~(CSID_PATH_INFO_RST_DONE |
1629 CSID_PATH_ERROR_FIFO_OVERFLOW);
1630 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1631 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1632 }
1633
1634 /*Halt the RDI path */
1635 cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
1636 csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
1637
1638 return rc;
1639}
1640
1641static int cam_ife_csid_get_time_stamp(
1642 struct cam_ife_csid_hw *csid_hw, void *cmd_args)
1643{
1644 struct cam_csid_get_time_stamp_args *time_stamp;
1645 struct cam_isp_resource_node *res;
1646 struct cam_ife_csid_reg_offset *csid_reg;
1647 struct cam_hw_soc_info *soc_info;
1648 uint32_t time_32, id;
1649
1650 time_stamp = (struct cam_csid_get_time_stamp_args *)cmd_args;
1651 res = time_stamp->node_res;
1652 csid_reg = csid_hw->csid_info->csid_reg;
1653 soc_info = &csid_hw->hw_info->soc_info;
1654
1655 if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
1656 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1657 CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
1658 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1659 res->res_id);
1660 return -EINVAL;
1661 }
1662
1663 if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
1664 pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
1665 __LINE__, csid_hw->hw_intf->hw_idx,
1666 csid_hw->hw_info->hw_state);
1667 return -EINVAL;
1668 }
1669
1670 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1671 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1672 csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
1673 time_stamp->time_stamp_val = time_32;
1674 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1675 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1676 csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
1677 time_stamp->time_stamp_val |= time_32;
1678 } else {
1679 id = res->res_id;
1680 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1681 csid_reg->rdi_reg[id]->
1682 csid_rdi_timestamp_curr1_sof_addr);
1683 time_stamp->time_stamp_val = time_32;
1684 time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
1685
1686 time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1687 csid_reg->rdi_reg[id]->
1688 csid_rdi_timestamp_curr0_sof_addr);
1689 time_stamp->time_stamp_val |= time_32;
1690 }
1691
1692 return 0;
1693}
1694static int cam_ife_csid_res_wait_for_halt(
1695 struct cam_ife_csid_hw *csid_hw,
1696 struct cam_isp_resource_node *res)
1697{
1698 int rc = 0;
1699 struct cam_ife_csid_reg_offset *csid_reg;
1700 struct cam_hw_soc_info *soc_info;
1701
1702 struct completion *complete;
1703 uint32_t val = 0, id;
1704
1705 csid_reg = csid_hw->csid_info->csid_reg;
1706 soc_info = &csid_hw->hw_info->soc_info;
1707
1708 if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
1709 CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
1710 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
1711 return -EINVAL;
1712 }
1713
1714 if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
1715 res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
1716 CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
1717 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1718 res->res_id, res->res_state);
1719 return rc;
1720 }
1721
1722 if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
1723 CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
1724 __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
1725 res->res_state);
1726 return -EINVAL;
1727 }
1728
1729 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
1730 complete = &csid_hw->csid_ipp_complete;
1731 else
1732 complete = &csid_hw->csid_rdin_complete[res->res_id];
1733
1734 rc = wait_for_completion_timeout(complete,
1735 msecs_to_jiffies(IFE_CSID_TIMEOUT));
1736 if (rc <= 0) {
1737 pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
1738 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1739 res->res_id, rc);
1740 if (rc == 0)
1741 /* continue even have timeout */
1742 rc = -ETIMEDOUT;
1743 }
1744
1745 /* Disable the interrupt */
1746 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
1747 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1748 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1749 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1750 CSID_PATH_ERROR_FIFO_OVERFLOW);
1751 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1752 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
1753 } else {
1754 id = res->res_id;
1755 val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
1756 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1757 val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
1758 CSID_PATH_ERROR_FIFO_OVERFLOW);
1759 cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
1760 csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
1761 }
1762 /* set state to init HW */
1763 res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
1764 return rc;
1765}
1766
1767static int cam_ife_csid_get_hw_caps(void *hw_priv,
1768 void *get_hw_cap_args, uint32_t arg_size)
1769{
1770 int rc = 0;
1771 struct cam_ife_csid_hw_caps *hw_caps;
1772 struct cam_ife_csid_hw *csid_hw;
1773 struct cam_hw_info *csid_hw_info;
1774 struct cam_ife_csid_reg_offset *csid_reg;
1775
1776 if (!hw_priv || !get_hw_cap_args) {
1777 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1778 return -EINVAL;
1779 }
1780
1781 csid_hw_info = (struct cam_hw_info *)hw_priv;
1782 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1783 csid_reg = csid_hw->csid_info->csid_reg;
1784 hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
1785
1786 hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
1787 hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
1788 hw_caps->major_version = csid_reg->cmn_reg->major_version;
1789 hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
1790 hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
1791
1792 CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
1793 __func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
1794 hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
1795 hw_caps->version_incr);
1796
1797 return rc;
1798}
1799
1800static int cam_ife_csid_reset(void *hw_priv,
1801 void *reset_args, uint32_t arg_size)
1802{
1803 struct cam_ife_csid_hw *csid_hw;
1804 struct cam_hw_info *csid_hw_info;
1805 struct cam_csid_reset_cfg_args *reset;
1806 int rc = 0;
1807
1808 if (!hw_priv || !reset_args || (arg_size !=
1809 sizeof(struct cam_csid_reset_cfg_args))) {
1810 pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
1811 return -EINVAL;
1812 }
1813
1814 csid_hw_info = (struct cam_hw_info *)hw_priv;
1815 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1816 reset = (struct cam_csid_reset_cfg_args *)reset_args;
1817
1818 switch (reset->reset_type) {
1819 case CAM_IFE_CSID_RESET_GLOBAL:
1820 rc = cam_ife_csid_global_reset(csid_hw);
1821 break;
1822 case CAM_IFE_CSID_RESET_PATH:
1823 rc = cam_ife_csid_path_reset(csid_hw, reset);
1824 break;
1825 default:
1826 pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
1827 __LINE__, reset->reset_type);
1828 rc = -EINVAL;
1829 break;
1830 }
1831
1832 return rc;
1833}
1834
1835static int cam_ife_csid_reserve(void *hw_priv,
1836 void *reserve_args, uint32_t arg_size)
1837{
1838 int rc = 0;
1839 struct cam_ife_csid_hw *csid_hw;
1840 struct cam_hw_info *csid_hw_info;
1841 struct cam_csid_hw_reserve_resource_args *reserv;
1842
1843 if (!hw_priv || !reserve_args || (arg_size !=
1844 sizeof(struct cam_csid_hw_reserve_resource_args))) {
1845 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1846 return -EINVAL;
1847 }
1848
1849 csid_hw_info = (struct cam_hw_info *)hw_priv;
1850 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1851 reserv = (struct cam_csid_hw_reserve_resource_args *)reserve_args;
1852
1853 mutex_lock(&csid_hw->hw_info->hw_mutex);
1854 switch (reserv->res_type) {
1855 case CAM_ISP_RESOURCE_CID:
1856 rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
1857 break;
1858 case CAM_ISP_RESOURCE_PIX_PATH:
1859 rc = cam_ife_csid_path_reserve(csid_hw, reserv);
1860 break;
1861 default:
1862 pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
1863 __LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
1864 rc = -EINVAL;
1865 break;
1866 }
1867 mutex_unlock(&csid_hw->hw_info->hw_mutex);
1868 return rc;
1869}
1870
1871static int cam_ife_csid_release(void *hw_priv,
1872 void *release_args, uint32_t arg_size)
1873{
1874 int rc = 0;
1875 struct cam_ife_csid_hw *csid_hw;
1876 struct cam_hw_info *csid_hw_info;
1877 struct cam_isp_resource_node *res;
1878 struct cam_ife_csid_cid_data *cid_data;
1879
1880 if (!hw_priv || !release_args ||
1881 (arg_size != sizeof(struct cam_isp_resource_node))) {
1882 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1883 return -EINVAL;
1884 }
1885
1886 csid_hw_info = (struct cam_hw_info *)hw_priv;
1887 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1888 res = (struct cam_isp_resource_node *)release_args;
1889
1890 mutex_lock(&csid_hw->hw_info->hw_mutex);
1891 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
1892 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
1893 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1894 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
1895 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
1896 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1897 res->res_id);
1898 rc = -EINVAL;
1899 goto end;
1900 }
1901
1902 if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
1903 CDBG("%s:%d:CSID:%d res type:%d Res %d in released state\n",
1904 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1905 res->res_type, res->res_id);
1906 goto end;
1907 }
1908
1909 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1910 res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
1911 CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
1912 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1913 res->res_type, res->res_id, res->res_state);
1914 rc = -EINVAL;
1915 goto end;
1916 }
1917
1918 CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
1919 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
1920
1921 switch (res->res_type) {
1922 case CAM_ISP_RESOURCE_CID:
1923 cid_data = (struct cam_ife_csid_cid_data *) res->res_priv;
1924 if (cid_data->cnt)
1925 cid_data->cnt--;
1926
1927 if (!cid_data->cnt)
1928 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
1929
1930 if (csid_hw->csi2_reserve_cnt)
1931 csid_hw->csi2_reserve_cnt--;
1932
1933 if (!csid_hw->csi2_reserve_cnt)
1934 memset(&csid_hw->csi2_rx_cfg, 0,
1935 sizeof(struct cam_ife_csid_csi2_rx_cfg));
1936
1937 CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
1938 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1939 res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
1940
1941 break;
1942 case CAM_ISP_RESOURCE_PIX_PATH:
1943 res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
1944 break;
1945 default:
1946 pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
1947 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1948 res->res_id);
1949 rc = -EINVAL;
1950 break;
1951 }
1952
1953end:
1954 mutex_unlock(&csid_hw->hw_info->hw_mutex);
1955 return rc;
1956}
1957
1958static int cam_ife_csid_init_hw(void *hw_priv,
1959 void *init_args, uint32_t arg_size)
1960{
1961 int rc = 0;
1962 struct cam_ife_csid_hw *csid_hw;
1963 struct cam_hw_info *csid_hw_info;
1964 struct cam_isp_resource_node *res;
1965 struct cam_ife_csid_reg_offset *csid_reg;
1966
1967 if (!hw_priv || !init_args ||
1968 (arg_size != sizeof(struct cam_isp_resource_node))) {
1969 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
1970 return -EINVAL;
1971 }
1972
1973 csid_hw_info = (struct cam_hw_info *)hw_priv;
1974 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
1975 res = (struct cam_isp_resource_node *)init_args;
1976 csid_reg = csid_hw->csid_info->csid_reg;
1977
1978 mutex_lock(&csid_hw->hw_info->hw_mutex);
1979 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
1980 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
1981 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
1982 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
1983 pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
1984 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
1985 res->res_id);
1986 rc = -EINVAL;
1987 goto end;
1988 }
1989
1990
1991 if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
1992 (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
1993 pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
1994 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
1995 res->res_type, res->res_id, res->res_state);
1996 rc = -EINVAL;
1997 goto end;
1998 }
1999
2000 CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
2001 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
2002
2003
2004 /* Initialize the csid hardware */
2005 rc = cam_ife_csid_enable_hw(csid_hw);
2006 if (rc)
2007 goto end;
2008
2009 switch (res->res_type) {
2010 case CAM_ISP_RESOURCE_CID:
2011 rc = cam_ife_csid_enable_csi2(csid_hw, res);
2012 break;
2013 case CAM_ISP_RESOURCE_PIX_PATH:
2014 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2015 rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
2016 else
2017 rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
2018
2019 break;
2020 default:
2021 pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
2022 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2023 res->res_type);
2024 break;
2025 }
2026
2027 if (rc)
2028 cam_ife_csid_disable_hw(csid_hw);
2029end:
2030 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2031 return rc;
2032}
2033
2034static int cam_ife_csid_deinit_hw(void *hw_priv,
2035 void *deinit_args, uint32_t arg_size)
2036{
2037 int rc = 0;
2038 struct cam_ife_csid_hw *csid_hw;
2039 struct cam_hw_info *csid_hw_info;
2040 struct cam_isp_resource_node *res;
2041
2042 if (!hw_priv || !deinit_args ||
2043 (arg_size != sizeof(struct cam_isp_resource_node))) {
2044 pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
2045 return -EINVAL;
2046 }
2047
2048 res = (struct cam_isp_resource_node *)deinit_args;
2049 csid_hw_info = (struct cam_hw_info *)hw_priv;
2050 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2051
2052 mutex_lock(&csid_hw->hw_info->hw_mutex);
2053 if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
2054 CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
2055 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2056 res->res_id);
2057 goto end;
2058 }
2059
2060 switch (res->res_type) {
2061 case CAM_ISP_RESOURCE_CID:
2062 rc = cam_ife_csid_disable_csi2(csid_hw, res);
2063 break;
2064 case CAM_ISP_RESOURCE_PIX_PATH:
2065 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2066 rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
2067 else
2068 rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
2069
2070 break;
2071 default:
2072 pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
2073 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2074 res->res_type);
2075 goto end;
2076 }
2077
2078 /* Disable CSID HW */
2079 cam_ife_csid_disable_hw(csid_hw);
2080
2081end:
2082 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2083 return rc;
2084}
2085
2086static int cam_ife_csid_start(void *hw_priv, void *start_args,
2087 uint32_t arg_size)
2088{
2089 int rc = 0;
2090 struct cam_ife_csid_hw *csid_hw;
2091 struct cam_hw_info *csid_hw_info;
2092 struct cam_isp_resource_node *res;
2093 struct cam_ife_csid_reg_offset *csid_reg;
2094
2095 if (!hw_priv || !start_args ||
2096 (arg_size != sizeof(struct cam_isp_resource_node))) {
2097 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2098 return -EINVAL;
2099 }
2100
2101 csid_hw_info = (struct cam_hw_info *)hw_priv;
2102 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2103 res = (struct cam_isp_resource_node *)start_args;
2104 csid_reg = csid_hw->csid_info->csid_reg;
2105
2106 mutex_lock(&csid_hw->hw_info->hw_mutex);
2107 if ((res->res_type == CAM_ISP_RESOURCE_CID &&
2108 res->res_id >= CAM_IFE_CSID_CID_MAX) ||
2109 (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2110 res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
2111 CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
2112 __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
2113 res->res_id);
2114 rc = -EINVAL;
2115 goto end;
2116 }
2117
2118 CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
2119 csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
2120
2121 switch (res->res_type) {
2122 case CAM_ISP_RESOURCE_CID:
2123 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2124 rc = cam_ife_csid_tpg_start(csid_hw, res);
2125 break;
2126 case CAM_ISP_RESOURCE_PIX_PATH:
2127 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2128 rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
2129 else
2130 rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
2131 break;
2132 default:
2133 pr_err("%s:%d:CSID:%d Invalid res type%d\n",
2134 __func__, __LINE__, csid_hw->hw_intf->hw_idx,
2135 res->res_type);
2136 break;
2137 }
2138end:
2139 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2140 return rc;
2141}
2142
2143static int cam_ife_csid_stop(void *hw_priv,
2144 void *stop_args, uint32_t arg_size)
2145{
2146 int rc = 0;
2147 struct cam_ife_csid_hw *csid_hw;
2148 struct cam_hw_info *csid_hw_info;
2149 struct cam_isp_resource_node *res;
2150 struct cam_csid_hw_stop_args *csid_stop;
2151 uint32_t i;
2152
2153 if (!hw_priv || !stop_args ||
2154 (arg_size != sizeof(struct cam_csid_hw_stop_args))) {
2155 pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
2156 return -EINVAL;
2157 }
2158 csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
2159 csid_hw_info = (struct cam_hw_info *)hw_priv;
2160 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2161
2162 mutex_lock(&csid_hw->hw_info->hw_mutex);
2163 /* Stop the resource first */
2164 for (i = 0; i < csid_stop->num_res; i++) {
2165 res = csid_stop->node_res[i];
2166 switch (res->res_type) {
2167 case CAM_ISP_RESOURCE_CID:
2168 if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
2169 rc = cam_ife_csid_tpg_stop(csid_hw, res);
2170 break;
2171 case CAM_ISP_RESOURCE_PIX_PATH:
2172 if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
2173 rc = cam_ife_csid_disable_ipp_path(csid_hw,
2174 res, csid_stop->stop_cmd);
2175 else
2176 rc = cam_ife_csid_disable_rdi_path(csid_hw,
2177 res, csid_stop->stop_cmd);
2178
2179 break;
2180 default:
2181 pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
2182 __LINE__, csid_hw->hw_intf->hw_idx,
2183 res->res_type);
2184 break;
2185 }
2186 }
2187
2188 /*wait for the path to halt */
2189 for (i = 0; i < csid_stop->num_res; i++) {
2190 res = csid_stop->node_res[i];
2191 if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
2192 csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
2193 rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
2194 }
2195
2196 mutex_unlock(&csid_hw->hw_info->hw_mutex);
2197 return rc;
2198
2199}
2200
2201static int cam_ife_csid_read(void *hw_priv,
2202 void *read_args, uint32_t arg_size)
2203{
2204 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2205
2206 return -EINVAL;
2207}
2208
2209static int cam_ife_csid_write(void *hw_priv,
2210 void *write_args, uint32_t arg_size)
2211{
2212 pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
2213 return -EINVAL;
2214}
2215
2216static int cam_ife_csid_process_cmd(void *hw_priv,
2217 uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
2218{
2219 int rc = 0;
2220 struct cam_ife_csid_hw *csid_hw;
2221 struct cam_hw_info *csid_hw_info;
2222
2223 if (!hw_priv || !cmd_args) {
2224 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2225 return -EINVAL;
2226 }
2227
2228 csid_hw_info = (struct cam_hw_info *)hw_priv;
2229 csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
2230
Jing Zhouff57d862017-03-21 00:54:25 -07002231 switch (cmd_type) {
2232 case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
2233 rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
2234 break;
2235 default:
2236 pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
2237 __LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
2238 rc = -EINVAL;
2239 break;
2240 }
Jing Zhouff57d862017-03-21 00:54:25 -07002241
2242 return rc;
2243
2244}
2245
2246irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
2247{
2248 struct cam_ife_csid_hw *csid_hw;
2249 struct cam_hw_soc_info *soc_info;
2250 struct cam_ife_csid_reg_offset *csid_reg;
2251 uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
2252 irq_status_rdi[4];
2253
2254 csid_hw = (struct cam_ife_csid_hw *)data;
2255
2256 CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
2257 csid_hw->hw_intf->hw_idx);
2258
2259 if (!data) {
2260 pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
2261 return IRQ_HANDLED;
2262 }
2263
2264 csid_reg = csid_hw->csid_info->csid_reg;
2265 soc_info = &csid_hw->hw_info->soc_info;
2266
2267 /* read */
2268 irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2269 csid_reg->cmn_reg->csid_top_irq_status_addr);
2270
2271 irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2272 csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
2273
2274 if (csid_reg->cmn_reg->no_pix)
2275 irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2276 csid_reg->ipp_reg->csid_ipp_irq_status_addr);
2277
2278
2279 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
2280 irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
2281 csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
2282
2283 /* clear */
2284 cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
2285 csid_reg->cmn_reg->csid_top_irq_clear_addr);
2286 cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
2287 csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
2288 if (csid_reg->cmn_reg->no_pix)
2289 cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
2290 csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
2291
2292 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2293 cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
2294 csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
2295 }
2296 cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
2297 csid_reg->cmn_reg->csid_irq_cmd_addr);
2298
2299 CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
2300 irq_status_rx);
2301 CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
2302 irq_status_ipp);
2303
2304 if (irq_status_top) {
2305 CDBG("%s:%d: CSID global reset complete......Exit\n",
2306 __func__, __LINE__);
2307 complete(&csid_hw->csid_top_complete);
2308 return IRQ_HANDLED;
2309 }
2310
2311
2312 if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
2313 CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
2314 complete(&csid_hw->csid_csi2_complete);
2315 }
2316
2317 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
2318 pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
2319 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2320 }
2321 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
2322 pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
2323 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2324 }
2325 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
2326 pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
2327 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2328 }
2329 if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
2330 pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
2331 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2332 }
2333 if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
2334 pr_err_ratelimited("%s:%d:CSID:%d TG OVER FLOW\n",
2335 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2336 }
2337 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
2338 pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
2339 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2340 }
2341 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
2342 pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
2343 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2344 }
2345 if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
2346 pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
2347 __func__, __LINE__, csid_hw->hw_intf->hw_idx);
2348 }
2349
2350 /*read the IPP errors */
2351 if (csid_reg->cmn_reg->no_pix) {
2352 /* IPP reset done bit */
2353 if (irq_status_ipp &
2354 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2355 CDBG("%s%d: CSID IPP reset complete\n",
2356 __func__, __LINE__);
2357 complete(&csid_hw->csid_ipp_complete);
2358 }
2359 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
2360 CDBG("%s: CSID IPP SOF received\n", __func__);
2361 if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
2362 CDBG("%s: CSID IPP SOL received\n", __func__);
2363 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
2364 CDBG("%s: CSID IPP EOL received\n", __func__);
2365 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2366 CDBG("%s: CSID IPP EOF received\n", __func__);
2367
2368 if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
2369 complete(&csid_hw->csid_ipp_complete);
2370
2371 if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2372 pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
2373 __func__, __LINE__,
2374 csid_hw->hw_intf->hw_idx);
2375 /*Stop IPP path immediately */
2376 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2377 soc_info->reg_map[0].mem_base +
2378 csid_reg->ipp_reg->csid_ipp_ctrl_addr);
2379 }
2380 }
2381
2382 for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
2383 if (irq_status_rdi[i] &
2384 BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
2385 CDBG("%s:%d: CSID rdi%d reset complete\n",
2386 __func__, __LINE__, i);
2387 complete(&csid_hw->csid_rdin_complete[i]);
2388 }
2389
2390 if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
2391 complete(&csid_hw->csid_rdin_complete[i]);
2392
2393 if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
2394 pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
2395 __func__, __LINE__,
2396 csid_hw->hw_intf->hw_idx);
2397 /*Stop RDI path immediately */
2398 cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
2399 soc_info->reg_map[0].mem_base +
2400 csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
2401 }
2402 }
2403
2404 CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
2405 return IRQ_HANDLED;
2406}
2407
2408int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
2409 uint32_t csid_idx)
2410{
2411 int rc = -EINVAL;
2412 uint32_t i;
2413 struct cam_ife_csid_path_cfg *path_data;
2414 struct cam_ife_csid_cid_data *cid_data;
2415 struct cam_hw_info *csid_hw_info;
2416 struct cam_ife_csid_hw *ife_csid_hw = NULL;
2417
2418 if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
2419 pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
2420 csid_idx);
2421 return rc;
2422 }
2423
2424 csid_hw_info = (struct cam_hw_info *) csid_hw_intf->hw_priv;
2425 ife_csid_hw = (struct cam_ife_csid_hw *) csid_hw_info->core_info;
2426
2427 ife_csid_hw->hw_intf = csid_hw_intf;
2428 ife_csid_hw->hw_info = csid_hw_info;
2429
2430 CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
2431 ife_csid_hw->hw_intf->hw_type, csid_idx);
2432
2433
2434 ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
2435 mutex_init(&ife_csid_hw->hw_info->hw_mutex);
2436 spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
2437 init_completion(&ife_csid_hw->hw_info->hw_complete);
2438
2439 init_completion(&ife_csid_hw->csid_top_complete);
2440 init_completion(&ife_csid_hw->csid_csi2_complete);
2441 init_completion(&ife_csid_hw->csid_ipp_complete);
2442 for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
2443 init_completion(&ife_csid_hw->csid_rdin_complete[i]);
2444
2445
2446 rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
2447 cam_ife_csid_irq, ife_csid_hw);
2448 if (rc < 0) {
2449 pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
2450 csid_idx);
2451 goto err;
2452 }
2453
2454 ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
2455 ife_csid_hw->hw_intf->hw_ops.init = cam_ife_csid_init_hw;
2456 ife_csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_deinit_hw;
2457 ife_csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_reset;
2458 ife_csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_reserve;
2459 ife_csid_hw->hw_intf->hw_ops.release = cam_ife_csid_release;
2460 ife_csid_hw->hw_intf->hw_ops.start = cam_ife_csid_start;
2461 ife_csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_stop;
2462 ife_csid_hw->hw_intf->hw_ops.read = cam_ife_csid_read;
2463 ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_write;
2464 ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
2465
2466 /*Initialize the CID resoure */
2467 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
2468 ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
2469 ife_csid_hw->cid_res[i].res_id = i;
2470 ife_csid_hw->cid_res[i].res_state =
2471 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2472 ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
2473
2474 cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
2475 GFP_KERNEL);
2476 if (!cid_data) {
2477 rc = -ENOMEM;
2478 goto err;
2479 }
2480 ife_csid_hw->cid_res[i].res_priv = cid_data;
2481 }
2482
2483 /* Initialize the IPP resources */
2484 if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
2485 ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
2486 ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
2487 ife_csid_hw->ipp_res.res_state =
2488 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2489 ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
2490 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2491 GFP_KERNEL);
2492 if (!path_data) {
2493 rc = -ENOMEM;
2494 goto err;
2495 }
2496 ife_csid_hw->ipp_res.res_priv = path_data;
2497 }
2498
2499 /* Initialize the RDI resource */
2500 for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2501 i++) {
2502 /* res type is from RDI 0 to RDI3 */
2503 ife_csid_hw->rdi_res[i].res_type =
2504 CAM_ISP_RESOURCE_PIX_PATH;
2505 ife_csid_hw->rdi_res[i].res_id = i;
2506 ife_csid_hw->rdi_res[i].res_state =
2507 CAM_ISP_RESOURCE_STATE_AVAILABLE;
2508 ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
2509
2510 path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
2511 GFP_KERNEL);
2512 if (!path_data) {
2513 rc = -ENOMEM;
2514 goto err;
2515 }
2516 ife_csid_hw->rdi_res[i].res_priv = path_data;
2517 }
2518
2519 return 0;
2520err:
2521 if (rc) {
2522 kfree(ife_csid_hw->ipp_res.res_priv);
2523 for (i = 0; i <
2524 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
2525 kfree(ife_csid_hw->rdi_res[i].res_priv);
2526
2527 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2528 kfree(ife_csid_hw->cid_res[i].res_priv);
2529
2530 }
2531
2532 return rc;
2533}
2534
2535
2536int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
2537{
2538 int rc = -EINVAL;
2539 uint32_t i;
2540
2541 if (!ife_csid_hw) {
2542 pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
2543 return rc;
2544 }
2545
2546 /* release the privdate data memory from resources */
2547 kfree(ife_csid_hw->ipp_res.res_priv);
2548 for (i = 0; i <
2549 ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
2550 i++) {
2551 kfree(ife_csid_hw->rdi_res[i].res_priv);
2552 }
2553 for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
2554 kfree(ife_csid_hw->cid_res[i].res_priv);
2555
2556
2557 return 0;
2558}
2559
2560