blob: 63ae9d1ea71a6dd201a1dc44444558d7bf22ab16 [file] [log] [blame]
Meera Gandedd5078e2019-12-31 14:23:36 +05301/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/io.h>
13#include <media/v4l2-subdev.h>
14#include <asm/div64.h>
15#include "msm_isp_util.h"
16#include "msm_isp_stats_util.h"
17#include "msm_isp_axi_util.h"
18#include "msm_isp48.h"
Srikanth Uyyala827f2b12019-05-07 16:27:36 +053019#include "trace/events/msm_cam.h"
Pratap Nirujogi6e759912018-01-17 17:51:17 +053020
21#define HANDLE_TO_IDX(handle) (handle & 0xFF)
22#define ISP_SOF_DEBUG_COUNT 0
Ramesh Vfb407b72019-05-10 16:42:10 +053023#define OTHER_VFE(vfe_id) (vfe_id == ISP_VFE0 ? ISP_VFE1 : ISP_VFE0)
Pratap Nirujogi6e759912018-01-17 17:51:17 +053024
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +053025#ifdef CONFIG_MSM_AVTIMER
26static struct avtimer_fptr_t avtimer_func;
27#endif
Pratap Nirujogi6e759912018-01-17 17:51:17 +053028static void msm_isp_reload_ping_pong_offset(
29 struct msm_vfe_axi_stream *stream_info);
30
31static void __msm_isp_axi_stream_update(
32 struct msm_vfe_axi_stream *stream_info,
33 struct msm_isp_timestamp *ts);
34
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +053035static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
36 struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
37 struct timeval *time_stamp, uint32_t frame_id);
38static void msm_isp_free_pending_buffer(
39 struct vfe_device *vfe_dev,
40 struct msm_vfe_axi_stream *stream_info,
41 struct msm_isp_timestamp *ts);
Pratap Nirujogi6e759912018-01-17 17:51:17 +053042static int msm_isp_update_stream_bandwidth(
43 struct msm_vfe_axi_stream *stream_info, int enable);
44
45#define DUAL_VFE_AND_VFE1(s, v) ((s->stream_src < RDI_INTF_0) && \
46 v->is_split && vfe_dev->pdev->id == ISP_VFE1)
47
48#define RDI_OR_NOT_DUAL_VFE(v, s) (!v->is_split || \
49 ((s->stream_src >= RDI_INTF_0) && \
50 (stream_info->stream_src <= RDI_INTF_2)))
51
52static int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
53 struct msm_vfe_axi_shared_data *axi_data,
54 struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd,
55 struct msm_vfe_axi_stream *stream_info)
56{
57 uint32_t i = 0;
58 int rc = 0;
59
60 if (stream_info->state != AVAILABLE) {
61 pr_err("%s:%d invalid state %d expected %d\n",
62 __func__, __LINE__, stream_info->state,
63 AVAILABLE);
64 return -EINVAL;
65 }
66
67 if (stream_info->num_isp == 0) {
68 stream_info->session_id = stream_cfg_cmd->session_id;
69 stream_info->stream_id = stream_cfg_cmd->stream_id;
70 stream_info->buf_divert = stream_cfg_cmd->buf_divert;
71 stream_info->stream_src = stream_cfg_cmd->stream_src;
72 stream_info->controllable_output =
73 stream_cfg_cmd->controllable_output;
74 stream_info->activated_framedrop_period =
75 MSM_VFE_STREAM_STOP_PERIOD;
76 if (stream_cfg_cmd->controllable_output)
77 stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
78 INIT_LIST_HEAD(&stream_info->request_q);
79 } else {
80 /* check if the stream has been added for the vfe-device */
81 if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
82 pr_err("%s: stream %pK/%x is already added for vfe dev %d vfe_mask %x\n",
83 __func__, stream_info, stream_info->stream_id,
84 vfe_dev->pdev->id, stream_info->vfe_mask);
85 return -EINVAL;
86 }
87 if (stream_info->session_id != stream_cfg_cmd->session_id) {
88 pr_err("%s: dual stream session id mismatch %d/%d\n",
89 __func__, stream_info->session_id,
90 stream_cfg_cmd->session_id);
91 rc = -EINVAL;
92 }
93 if (stream_info->stream_id != stream_cfg_cmd->stream_id) {
94 pr_err("%s: dual stream stream id mismatch %d/%d\n",
95 __func__, stream_info->stream_id,
96 stream_cfg_cmd->stream_id);
97 rc = -EINVAL;
98 }
99 if (stream_info->controllable_output !=
100 stream_cfg_cmd->controllable_output) {
101 pr_err("%s: dual stream controllable_op mismatch %d/%d\n",
102 __func__, stream_info->controllable_output,
103 stream_cfg_cmd->controllable_output);
104 rc = -EINVAL;
105 }
106 if (stream_info->buf_divert != stream_cfg_cmd->buf_divert) {
107 pr_err("%s: dual stream buf_divert mismatch %d/%d\n",
108 __func__, stream_info->buf_divert,
109 stream_cfg_cmd->buf_divert);
110 rc = -EINVAL;
111 }
112 if (rc)
113 return rc;
114 }
115 stream_info->vfe_dev[stream_info->num_isp] = vfe_dev;
116 stream_info->num_isp++;
117
118 if ((axi_data->stream_handle_cnt << 8) == 0)
119 axi_data->stream_handle_cnt++;
120
121 stream_cfg_cmd->axi_stream_handle =
122 (++axi_data->stream_handle_cnt) << 8 | stream_info->stream_src;
123
124 ISP_DBG("%s: vfe %d handle %x\n", __func__, vfe_dev->pdev->id,
125 stream_cfg_cmd->axi_stream_handle);
126
127 stream_info->stream_handle[stream_info->num_isp - 1] =
128 stream_cfg_cmd->axi_stream_handle;
129 stream_info->vfe_mask |= (1 << vfe_dev->pdev->id);
130
131 if (!vfe_dev->is_split || stream_cfg_cmd->stream_src >= RDI_INTF_0 ||
132 stream_info->num_isp == MAX_VFE) {
133 stream_info->state = INACTIVE;
134
135 for (i = 0; i < MSM_ISP_COMP_IRQ_MAX; i++)
136 stream_info->composite_irq[i] = 0;
137 }
138 return 0;
139}
140
141static void msm_isp_axi_destroy_stream(
142 struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
143{
144 int k;
145 int j;
146 int i;
147 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
148
149 /*
150 * For the index being removed, shift everything to it's right by 1
151 * so that the index being removed becomes the last index
152 */
153 for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
154 stream_info->vfe_dev[i] = stream_info->vfe_dev[k];
155 stream_info->stream_handle[i] = stream_info->stream_handle[k];
156 stream_info->bandwidth[i] = stream_info->bandwidth[k];
157 stream_info->max_width[i] = stream_info->max_width[k];
158 stream_info->comp_mask_index[i] =
159 stream_info->comp_mask_index[k];
160 for (j = 0; j < stream_info->num_planes; j++) {
161 stream_info->plane_cfg[i][j] =
162 stream_info->plane_cfg[k][j];
163 stream_info->wm[i][j] = stream_info->wm[k][j];
164 }
165 }
166
167 stream_info->num_isp--;
168 stream_info->vfe_dev[stream_info->num_isp] = NULL;
169 stream_info->stream_handle[stream_info->num_isp] = 0;
170 stream_info->bandwidth[stream_info->num_isp] = 0;
171 stream_info->max_width[stream_info->num_isp] = 0;
172 stream_info->comp_mask_index[stream_info->num_isp] = -1;
173 stream_info->vfe_mask &= ~(1 << vfe_dev->pdev->id);
174 for (j = 0; j < stream_info->num_planes; j++) {
175 stream_info->wm[stream_info->num_isp][j] = -1;
176 memset(&stream_info->plane_cfg[stream_info->num_isp][j],
177 0, sizeof(
178 stream_info->plane_cfg[stream_info->num_isp][j]));
179 }
180
181 if (stream_info->num_isp == 0) {
182 /* release the bufq */
183 for (k = 0; k < VFE_BUF_QUEUE_MAX; k++)
184 stream_info->bufq_handle[k] = 0;
185 stream_info->vfe_mask = 0;
186 stream_info->state = AVAILABLE;
Meera Gande38b598e2018-10-10 15:56:54 +0530187 memset(&stream_info->request_queue_cmd,
188 0, sizeof(stream_info->request_queue_cmd));
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530189 }
190}
191
192static int msm_isp_validate_axi_request(struct vfe_device *vfe_dev,
193 struct msm_vfe_axi_stream *stream_info,
194 struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
195{
196 int rc = -1, i;
197 int vfe_idx;
198 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
199
200 switch (stream_cfg_cmd->output_format) {
201 case V4L2_PIX_FMT_YUYV:
202 case V4L2_PIX_FMT_YVYU:
203 case V4L2_PIX_FMT_UYVY:
204 case V4L2_PIX_FMT_VYUY:
205 case V4L2_PIX_FMT_SBGGR8:
206 case V4L2_PIX_FMT_SGBRG8:
207 case V4L2_PIX_FMT_SGRBG8:
208 case V4L2_PIX_FMT_SRGGB8:
209 case V4L2_PIX_FMT_SBGGR10:
210 case V4L2_PIX_FMT_SGBRG10:
211 case V4L2_PIX_FMT_SGRBG10:
212 case V4L2_PIX_FMT_SRGGB10:
213 case V4L2_PIX_FMT_SBGGR10DPCM6:
214 case V4L2_PIX_FMT_SGBRG10DPCM6:
215 case V4L2_PIX_FMT_SGRBG10DPCM6:
216 case V4L2_PIX_FMT_SRGGB10DPCM6:
217 case V4L2_PIX_FMT_SBGGR10DPCM8:
218 case V4L2_PIX_FMT_SGBRG10DPCM8:
219 case V4L2_PIX_FMT_SGRBG10DPCM8:
220 case V4L2_PIX_FMT_SRGGB10DPCM8:
221 case V4L2_PIX_FMT_SBGGR12:
222 case V4L2_PIX_FMT_SGBRG12:
223 case V4L2_PIX_FMT_SGRBG12:
224 case V4L2_PIX_FMT_SRGGB12:
225 case V4L2_PIX_FMT_SBGGR14:
226 case V4L2_PIX_FMT_SGBRG14:
227 case V4L2_PIX_FMT_SGRBG14:
228 case V4L2_PIX_FMT_SRGGB14:
229 case V4L2_PIX_FMT_QBGGR8:
230 case V4L2_PIX_FMT_QGBRG8:
231 case V4L2_PIX_FMT_QGRBG8:
232 case V4L2_PIX_FMT_QRGGB8:
233 case V4L2_PIX_FMT_QBGGR10:
234 case V4L2_PIX_FMT_QGBRG10:
235 case V4L2_PIX_FMT_QGRBG10:
236 case V4L2_PIX_FMT_QRGGB10:
237 case V4L2_PIX_FMT_QBGGR12:
238 case V4L2_PIX_FMT_QGBRG12:
239 case V4L2_PIX_FMT_QGRBG12:
240 case V4L2_PIX_FMT_QRGGB12:
241 case V4L2_PIX_FMT_QBGGR14:
242 case V4L2_PIX_FMT_QGBRG14:
243 case V4L2_PIX_FMT_QGRBG14:
244 case V4L2_PIX_FMT_QRGGB14:
245 case V4L2_PIX_FMT_P16BGGR10:
246 case V4L2_PIX_FMT_P16GBRG10:
247 case V4L2_PIX_FMT_P16GRBG10:
248 case V4L2_PIX_FMT_P16RGGB10:
Meera Gandefad84d22018-01-09 18:24:51 +0530249 case V4L2_PIX_FMT_P16BGGR12:
250 case V4L2_PIX_FMT_P16GBRG12:
251 case V4L2_PIX_FMT_P16GRBG12:
252 case V4L2_PIX_FMT_P16RGGB12:
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530253 case V4L2_PIX_FMT_JPEG:
254 case V4L2_PIX_FMT_META:
255 case V4L2_PIX_FMT_META10:
256 case V4L2_PIX_FMT_GREY:
257 stream_info->num_planes = 1;
258 stream_info->format_factor = ISP_Q2;
259 break;
260 case V4L2_PIX_FMT_NV12:
261 case V4L2_PIX_FMT_NV21:
262 case V4L2_PIX_FMT_NV14:
263 case V4L2_PIX_FMT_NV41:
264 stream_info->num_planes = 2;
265 stream_info->format_factor = 1.5 * ISP_Q2;
266 break;
267 case V4L2_PIX_FMT_NV16:
268 case V4L2_PIX_FMT_NV61:
269 stream_info->num_planes = 2;
270 stream_info->format_factor = 2 * ISP_Q2;
271 break;
272 case V4L2_PIX_FMT_NV24:
273 case V4L2_PIX_FMT_NV42:
274 stream_info->num_planes = 2;
275 stream_info->format_factor = 3 * ISP_Q2;
276 break;
277 /*TD: Add more image format*/
278 default:
279 msm_isp_print_fourcc_error(__func__,
280 stream_cfg_cmd->output_format);
281 return rc;
282 }
283
284 if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
285 stream_info->num_planes) {
286 pr_err("%s: No free write masters\n", __func__);
287 return rc;
288 }
289
290 if ((stream_info->num_planes > 1) &&
291 (axi_data->hw_info->num_comp_mask -
292 axi_data->num_used_composite_mask < 1)) {
293 pr_err("%s: No free composite mask\n", __func__);
294 return rc;
295 }
296
297 if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
298 pr_err("%s: Invalid skip pattern\n", __func__);
299 return rc;
300 }
301
302 if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
303 pr_err("%s: Invalid skip pattern\n", __func__);
304 return rc;
305 }
306
307 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
308
309 for (i = 0; i < stream_info->num_planes; i++) {
310 stream_info->plane_cfg[vfe_idx][i] =
311 stream_cfg_cmd->plane_cfg[i];
312 stream_info->max_width[vfe_idx] =
313 max(stream_info->max_width[vfe_idx],
314 stream_cfg_cmd->plane_cfg[i].output_width);
315 }
316
317 stream_info->output_format = stream_cfg_cmd->output_format;
318 stream_info->runtime_output_format = stream_info->output_format;
319 stream_info->stream_src = stream_cfg_cmd->stream_src;
320 stream_info->frame_based = stream_cfg_cmd->frame_base;
321 return 0;
322}
323
324static uint32_t msm_isp_axi_get_plane_size(
325 struct msm_vfe_axi_stream *stream_info, int vfe_idx, int plane_idx)
326{
327 uint32_t size = 0;
328 struct msm_vfe_axi_plane_cfg *plane_cfg =
329 stream_info->plane_cfg[vfe_idx];
330 switch (stream_info->output_format) {
331 case V4L2_PIX_FMT_YUYV:
332 case V4L2_PIX_FMT_YVYU:
333 case V4L2_PIX_FMT_UYVY:
334 case V4L2_PIX_FMT_VYUY:
335 case V4L2_PIX_FMT_SBGGR8:
336 case V4L2_PIX_FMT_SGBRG8:
337 case V4L2_PIX_FMT_SGRBG8:
338 case V4L2_PIX_FMT_SRGGB8:
339 case V4L2_PIX_FMT_QBGGR8:
340 case V4L2_PIX_FMT_QGBRG8:
341 case V4L2_PIX_FMT_QGRBG8:
342 case V4L2_PIX_FMT_QRGGB8:
343 case V4L2_PIX_FMT_JPEG:
344 case V4L2_PIX_FMT_META:
345 case V4L2_PIX_FMT_GREY:
346 size = plane_cfg[plane_idx].output_height *
347 plane_cfg[plane_idx].output_width;
348 break;
349 case V4L2_PIX_FMT_SBGGR10:
350 case V4L2_PIX_FMT_SGBRG10:
351 case V4L2_PIX_FMT_SGRBG10:
352 case V4L2_PIX_FMT_SRGGB10:
353 case V4L2_PIX_FMT_SBGGR10DPCM6:
354 case V4L2_PIX_FMT_SGBRG10DPCM6:
355 case V4L2_PIX_FMT_SGRBG10DPCM6:
356 case V4L2_PIX_FMT_SRGGB10DPCM6:
357 case V4L2_PIX_FMT_SBGGR10DPCM8:
358 case V4L2_PIX_FMT_SGBRG10DPCM8:
359 case V4L2_PIX_FMT_SGRBG10DPCM8:
360 case V4L2_PIX_FMT_SRGGB10DPCM8:
361 case V4L2_PIX_FMT_QBGGR10:
362 case V4L2_PIX_FMT_QGBRG10:
363 case V4L2_PIX_FMT_QGRBG10:
364 case V4L2_PIX_FMT_QRGGB10:
365 case V4L2_PIX_FMT_META10:
366 /* TODO: fix me */
367 size = plane_cfg[plane_idx].output_height *
368 plane_cfg[plane_idx].output_width;
369 break;
370 case V4L2_PIX_FMT_SBGGR12:
371 case V4L2_PIX_FMT_SGBRG12:
372 case V4L2_PIX_FMT_SGRBG12:
373 case V4L2_PIX_FMT_SRGGB12:
374 case V4L2_PIX_FMT_QBGGR12:
375 case V4L2_PIX_FMT_QGBRG12:
376 case V4L2_PIX_FMT_QGRBG12:
377 case V4L2_PIX_FMT_QRGGB12:
378 case V4L2_PIX_FMT_SBGGR14:
379 case V4L2_PIX_FMT_SGBRG14:
380 case V4L2_PIX_FMT_SGRBG14:
381 case V4L2_PIX_FMT_SRGGB14:
382 case V4L2_PIX_FMT_QBGGR14:
383 case V4L2_PIX_FMT_QGBRG14:
384 case V4L2_PIX_FMT_QGRBG14:
385 case V4L2_PIX_FMT_QRGGB14:
386 /* TODO: fix me */
387 size = plane_cfg[plane_idx].output_height *
388 plane_cfg[plane_idx].output_width;
389 break;
390 case V4L2_PIX_FMT_P16BGGR10:
391 case V4L2_PIX_FMT_P16GBRG10:
392 case V4L2_PIX_FMT_P16GRBG10:
393 case V4L2_PIX_FMT_P16RGGB10:
Meera Gandefad84d22018-01-09 18:24:51 +0530394 case V4L2_PIX_FMT_P16BGGR12:
395 case V4L2_PIX_FMT_P16GBRG12:
396 case V4L2_PIX_FMT_P16GRBG12:
397 case V4L2_PIX_FMT_P16RGGB12:
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530398 size = plane_cfg[plane_idx].output_height *
399 plane_cfg[plane_idx].output_width;
400 break;
401 case V4L2_PIX_FMT_NV12:
402 case V4L2_PIX_FMT_NV21:
403 if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
404 size = plane_cfg[plane_idx].output_height *
405 plane_cfg[plane_idx].output_width;
406 else
407 size = plane_cfg[plane_idx].output_height *
408 plane_cfg[plane_idx].output_width;
409 break;
410 case V4L2_PIX_FMT_NV14:
411 case V4L2_PIX_FMT_NV41:
412 if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
413 size = plane_cfg[plane_idx].output_height *
414 plane_cfg[plane_idx].output_width;
415 else
416 size = plane_cfg[plane_idx].output_height *
417 plane_cfg[plane_idx].output_width;
418 break;
419 case V4L2_PIX_FMT_NV16:
420 case V4L2_PIX_FMT_NV61:
421 case V4L2_PIX_FMT_NV24:
422 case V4L2_PIX_FMT_NV42:
423 size = plane_cfg[plane_idx].output_height *
424 plane_cfg[plane_idx].output_width;
425 break;
426 /*TD: Add more image format*/
427 default:
428 msm_isp_print_fourcc_error(__func__,
429 stream_info->output_format);
430 break;
431 }
432 return size;
433}
434
435static void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
436 struct msm_vfe_axi_stream *stream_info)
437{
438 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
439 int i, j;
440 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
441
442 for (i = 0; i < stream_info->num_planes; i++) {
443 for (j = 0; j < axi_data->hw_info->num_wm; j++) {
444 if (!axi_data->free_wm[j]) {
445 axi_data->free_wm[j] =
446 stream_info->stream_handle[vfe_idx];
447 axi_data->wm_image_size[j] =
448 msm_isp_axi_get_plane_size(
449 stream_info, vfe_idx, i);
450 axi_data->num_used_wm++;
451 break;
452 }
453 }
454 ISP_DBG("%s vfe %d stream_handle %x wm %d\n", __func__,
455 vfe_dev->pdev->id,
456 stream_info->stream_handle[vfe_idx], j);
457 stream_info->wm[vfe_idx][i] = j;
458 /* setup var to ignore bus error from RDI wm */
459 if (stream_info->stream_src >= RDI_INTF_0) {
460 if (vfe_dev->hw_info->vfe_ops.core_ops.
461 set_bus_err_ign_mask)
462 vfe_dev->hw_info->vfe_ops.core_ops.
463 set_bus_err_ign_mask(vfe_dev, j, 1);
464 }
465 }
466}
467
468void msm_isp_axi_free_wm(struct vfe_device *vfe_dev,
469 struct msm_vfe_axi_stream *stream_info)
470{
471 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
472 int i;
473 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
474
475 for (i = 0; i < stream_info->num_planes; i++) {
476 axi_data->free_wm[stream_info->wm[vfe_idx][i]] = 0;
477 axi_data->num_used_wm--;
478 if (stream_info->stream_src >= RDI_INTF_0) {
479 if (vfe_dev->hw_info->vfe_ops.core_ops.
480 set_bus_err_ign_mask)
481 vfe_dev->hw_info->vfe_ops.core_ops.
482 set_bus_err_ign_mask(vfe_dev,
483 stream_info->wm[vfe_idx][i], 0);
484 }
485 }
486 if (stream_info->stream_src <= IDEAL_RAW)
487 axi_data->num_pix_stream++;
488 else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
489 axi_data->num_rdi_stream++;
490}
491
492static void msm_isp_axi_reserve_comp_mask(
493 struct vfe_device *vfe_dev,
494 struct msm_vfe_axi_stream *stream_info)
495{
496 int i;
497 uint8_t comp_mask = 0;
498 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
499 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
500
501 for (i = 0; i < stream_info->num_planes; i++)
502 comp_mask |= 1 << stream_info->wm[vfe_idx][i];
503
504 for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
505 if (!axi_data->composite_info[i].stream_handle) {
506 axi_data->composite_info[i].stream_handle =
507 stream_info->stream_handle[vfe_idx];
508 axi_data->composite_info[i].
509 stream_composite_mask = comp_mask;
510 axi_data->num_used_composite_mask++;
511 break;
512 }
513 }
514 stream_info->comp_mask_index[vfe_idx] = i;
515}
516
517static void msm_isp_axi_free_comp_mask(struct vfe_device *vfe_dev,
518 struct msm_vfe_axi_stream *stream_info)
519{
520 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
521 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
522
523 axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
524 stream_composite_mask = 0;
525 axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
526 stream_handle = 0;
527 axi_data->num_used_composite_mask--;
528}
529
530/**
531 * msm_isp_cfg_framedrop_reg() - Program the period and pattern
532 * @stream_info: The stream for which programming is done
533 *
534 * This function calculates the period and pattern to be configured
535 * for the stream based on the current frame id of the stream's input
536 * source and the initial framedrops.
537 *
538 * Returns void.
539 */
540static void msm_isp_cfg_framedrop_reg(
541 struct msm_vfe_axi_stream *stream_info)
542{
543 struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
544 uint32_t runtime_init_frame_drop;
545 uint32_t framedrop_pattern = 0;
546 uint32_t framedrop_period = MSM_VFE_STREAM_STOP_PERIOD;
547 enum msm_vfe_input_src frame_src = SRC_TO_INTF(stream_info->stream_src);
548 int i;
549
Shobhit Singh30942eb2018-06-26 18:45:40 +0530550 if (vfe_dev == NULL) {
551 pr_err("%s %d returning vfe_dev is NULL\n",
552 __func__, __LINE__);
553 return;
554 }
555
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530556 if (vfe_dev->axi_data.src_info[frame_src].frame_id >=
557 stream_info->init_frame_drop)
558 runtime_init_frame_drop = 0;
559 else
560 runtime_init_frame_drop = stream_info->init_frame_drop -
561 vfe_dev->axi_data.src_info[frame_src].frame_id;
562
563 if (!runtime_init_frame_drop)
564 framedrop_period = stream_info->current_framedrop_period;
565
566 if (framedrop_period != MSM_VFE_STREAM_STOP_PERIOD)
567 framedrop_pattern = 0x1;
568
569 if (WARN_ON(framedrop_period == 0))
570 pr_err("%s framedrop_period is 0", __func__);
571
572 for (i = 0; i < stream_info->num_isp; i++) {
573 vfe_dev = stream_info->vfe_dev[i];
574 vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
575 vfe_dev, stream_info, framedrop_pattern,
576 framedrop_period);
577 }
578
579 ISP_DBG("%s: stream %x src %x framedrop pattern %x period %u\n",
580 __func__,
581 stream_info->stream_handle[0], stream_info->stream_src,
582 framedrop_pattern, framedrop_period);
583
584 stream_info->requested_framedrop_period = framedrop_period;
585}
586
587static int msm_isp_composite_irq(struct vfe_device *vfe_dev,
588 struct msm_vfe_axi_stream *stream_info,
589 enum msm_isp_comp_irq_types irq)
590{
591 /* interrupt recv on same vfe w/o recv on other vfe */
592 if (stream_info->composite_irq[irq] & (1 << vfe_dev->pdev->id)) {
593 msm_isp_dump_ping_pong_mismatch(vfe_dev);
594 pr_err("%s: irq %d out of sync for dual vfe on vfe %d\n",
595 __func__, irq, vfe_dev->pdev->id);
596 return -EINVAL;
597 }
598
599 stream_info->composite_irq[irq] |= (1 << vfe_dev->pdev->id);
600 if (stream_info->composite_irq[irq] != stream_info->vfe_mask)
601 return 1;
602
603 stream_info->composite_irq[irq] = 0;
604
605 return 0;
606}
607
608/**
609 * msm_isp_update_framedrop_reg() - Update frame period pattern on h/w
610 * @stream_info: Stream for which update is to be performed
611 *
612 * If the period and pattern needs to be updated for a stream then it is
613 * updated here. Updates happen if initial frame drop reaches 0 or burst
614 * streams have been provided new skip pattern from user space.
615 *
616 * Returns void
617 */
Meera Ganded2f93c42018-10-16 12:20:15 +0530618static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info,
619 uint32_t drop_reconfig)
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530620{
621 if (stream_info->stream_type == BURST_STREAM) {
622 if (stream_info->runtime_num_burst_capture == 0 ||
623 (stream_info->runtime_num_burst_capture == 1 &&
624 stream_info->activated_framedrop_period == 1))
625 stream_info->current_framedrop_period =
626 MSM_VFE_STREAM_STOP_PERIOD;
627 }
628
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +0530629 if (stream_info->undelivered_request_cnt > 0)
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530630 stream_info->current_framedrop_period =
631 MSM_VFE_STREAM_STOP_PERIOD;
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530632 /*
633 * re-configure the period pattern, only if it's not already
634 * set to what we want
635 */
636 if (stream_info->current_framedrop_period !=
637 stream_info->requested_framedrop_period) {
638 msm_isp_cfg_framedrop_reg(stream_info);
639 }
640}
641
642void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
643 enum msm_vfe_input_src frame_src,
644 enum msm_isp_comp_irq_types irq,
645 struct msm_isp_timestamp *ts)
646{
647 int i;
648 struct msm_vfe_axi_stream *stream_info;
649 unsigned long flags;
650 int ret;
651
652 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
653 stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
654 if (SRC_TO_INTF(stream_info->stream_src) !=
655 frame_src) {
656 continue;
657 }
658 if (stream_info->state == AVAILABLE ||
659 stream_info->state == INACTIVE)
660 continue;
661
662 spin_lock_irqsave(&stream_info->lock, flags);
663
664 ret = msm_isp_composite_irq(vfe_dev, stream_info, irq);
665 if (ret) {
666 spin_unlock_irqrestore(&stream_info->lock, flags);
667 if (ret < 0) {
668 msm_isp_halt_send_error(vfe_dev,
669 ISP_EVENT_PING_PONG_MISMATCH);
670 return;
671 }
672 continue;
673 }
674
675 switch (irq) {
676 case MSM_ISP_COMP_IRQ_REG_UPD:
677 stream_info->activated_framedrop_period =
678 stream_info->requested_framedrop_period;
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +0530679 /* Free Pending Buffers which are backed-up due to
680 * delay in RUP from userspace to Avoid pageFault
681 */
682 msm_isp_free_pending_buffer(vfe_dev, stream_info, ts);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530683 __msm_isp_axi_stream_update(stream_info, ts);
684 break;
685 case MSM_ISP_COMP_IRQ_EPOCH:
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +0530686 if (stream_info->state == ACTIVE) {
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +0530687 struct vfe_device *temp = NULL;
688 struct msm_vfe_common_dev_data *c_data;
689 uint32_t drop_reconfig =
690 vfe_dev->isp_page->drop_reconfig;
691 if (stream_info->num_isp > 1 &&
692 vfe_dev->pdev->id == ISP_VFE0) {
693 c_data = vfe_dev->common_data;
694 temp = c_data->dual_vfe_res->vfe_dev[
695 ISP_VFE1];
696 drop_reconfig =
697 temp->isp_page->drop_reconfig;
698 }
Meera Ganded2f93c42018-10-16 12:20:15 +0530699 msm_isp_update_framedrop_reg(stream_info,
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +0530700 drop_reconfig);
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +0530701 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530702 break;
703 default:
704 WARN(1, "Invalid irq %d\n", irq);
705 }
706 spin_unlock_irqrestore(&stream_info->lock, flags);
707 }
708}
709
710/**
711 * msm_isp_reset_framedrop() - Compute the framedrop period pattern
712 * @vfe_dev: Device for which the period and pattern is computed
713 * @stream_info: The stream for the which period and pattern is generated
714 *
715 * This function is called when stream starts or is reset. It's main
716 * purpose is to setup the runtime parameters of framedrop required
717 * for the stream.
718 *
719 * Returms void
720 */
721void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
722 struct msm_vfe_axi_stream *stream_info)
723{
724 uint32_t framedrop_period = 0;
725
726 stream_info->runtime_num_burst_capture = stream_info->num_burst_capture;
727
728 /**
729 * only reset none controllable output stream, since the
730 * controllable stream framedrop period will be controlled
731 * by the request frame api
732 */
733 if (!stream_info->controllable_output) {
734 framedrop_period =
735 msm_isp_get_framedrop_period(
736 stream_info->frame_skip_pattern);
737 if (stream_info->frame_skip_pattern == SKIP_ALL)
738 stream_info->current_framedrop_period =
739 MSM_VFE_STREAM_STOP_PERIOD;
740 else
741 stream_info->current_framedrop_period =
742 framedrop_period;
743 }
744
745 msm_isp_cfg_framedrop_reg(stream_info);
746 ISP_DBG("%s: init frame drop: %d\n", __func__,
747 stream_info->init_frame_drop);
748 ISP_DBG("%s: num_burst_capture: %d\n", __func__,
749 stream_info->runtime_num_burst_capture);
750}
751
752void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
753 struct msm_isp_timestamp *ts, struct msm_isp_sof_info *sof_info)
754{
755 struct msm_vfe_axi_stream *stream_info;
756 struct msm_vfe_axi_shared_data *axi_data;
757 int i;
758 uint32_t stream_idx;
759
760 if (!vfe_dev || !sof_info) {
761 pr_err("%s %d failed: vfe_dev %pK sof_info %pK\n", __func__,
762 __LINE__, vfe_dev, sof_info);
763 return;
764 }
765 sof_info->regs_not_updated = 0;
766 sof_info->reg_update_fail_mask = 0;
767 sof_info->stream_get_buf_fail_mask = 0;
768
769 axi_data = &vfe_dev->axi_data;
770
771 for (i = 0; i < RDI_INTF_0; i++) {
772 stream_info = msm_isp_get_stream_common_data(vfe_dev,
773 i);
774 stream_idx = HANDLE_TO_IDX(stream_info->stream_handle[0]);
775
776 /*
777 * Process drop only if controllable ACTIVE PIX stream &&
778 * reg_not_updated
779 * OR stream is in RESUMING state.
780 * Other cases there is no drop to report, so continue.
781 */
782 if (!((stream_info->state == ACTIVE &&
783 stream_info->controllable_output &&
784 (SRC_TO_INTF(stream_info->stream_src) ==
785 VFE_PIX_0)) ||
786 stream_info->state == RESUMING))
787 continue;
788
789 if (stream_info->controllable_output &&
790 !vfe_dev->reg_updated) {
791 if (stream_info->undelivered_request_cnt) {
792 /* report that registers are not updated
793 * and return empty buffer for controllable
794 * outputs
795 */
796 sof_info->regs_not_updated =
797 !vfe_dev->reg_updated;
798 pr_err("Drop frame no reg update\n");
799 if (msm_isp_drop_frame(vfe_dev, stream_info, ts,
800 sof_info)) {
801 pr_err("drop frame failed\n");
802 }
803 }
804 }
805
806 if (stream_info->state == RESUMING &&
807 !stream_info->controllable_output) {
808 ISP_DBG("%s: axi_updating_mask strm_id %x frm_id %d\n",
809 __func__, stream_idx, vfe_dev->axi_data.
810 src_info[SRC_TO_INTF(stream_info->stream_src)]
811 .frame_id);
812 sof_info->axi_updating_mask |=
813 1 << stream_idx;
814 }
815 }
816
817 vfe_dev->reg_updated = 0;
818
819 /* report frame drop per stream */
820 if (vfe_dev->error_info.framedrop_flag) {
821 for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
822 if (vfe_dev->error_info.stream_framedrop_count[i]) {
823 ISP_DBG("%s: get buf failed i %d\n", __func__,
824 i);
825 sof_info->stream_get_buf_fail_mask |= (1 << i);
826 vfe_dev->error_info.
827 stream_framedrop_count[i] = 0;
828 }
829 }
830 vfe_dev->error_info.framedrop_flag = 0;
831 }
832}
833
834static void msm_isp_sync_dual_cam_frame_id(
835 struct vfe_device *vfe_dev,
836 struct master_slave_resource_info *ms_res,
837 enum msm_vfe_input_src frame_src,
838 struct msm_isp_timestamp *ts)
839{
840 struct msm_vfe_src_info *src_info =
841 &vfe_dev->axi_data.src_info[frame_src];
842 int i;
843 uint32_t frame_id = src_info->frame_id;
844 uint32_t master_time = 0, current_time;
845
846 if (src_info->dual_hw_ms_info.sync_state ==
847 ms_res->dual_sync_mode) {
848 (frame_src == VFE_PIX_0) ? src_info->frame_id +=
849 vfe_dev->axi_data.src_info[frame_src].
850 sof_counter_step :
851 src_info->frame_id++;
852 return;
853 }
854
855 /* find highest frame id */
856 for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
857 if (ms_res->src_info[i] == NULL)
858 continue;
859 if (src_info == ms_res->src_info[i] ||
860 ms_res->src_info[i]->active == 0)
861 continue;
862 if (frame_id >= ms_res->src_info[i]->frame_id)
863 continue;
864 frame_id = ms_res->src_info[i]->frame_id;
865 master_time = ms_res->src_info[i]->
866 dual_hw_ms_info.sof_info.mono_timestamp_ms;
867 }
868 /* copy highest frame id to the intf based on sof delta */
869 current_time = ts->buf_time.tv_sec * 1000 +
870 ts->buf_time.tv_usec / 1000;
871
872 if (current_time > master_time &&
873 (current_time - master_time) > ms_res->sof_delta_threshold) {
874 if (frame_src == VFE_PIX_0)
875 frame_id += vfe_dev->axi_data.src_info[frame_src].
876 sof_counter_step;
877 else
878 frame_id += 1;
879 } else {
880 for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
881 if (ms_res->src_info[i] == NULL)
882 continue;
883 if (src_info == ms_res->src_info[i] ||
884 ((1 << ms_res->src_info[i]->
885 dual_hw_ms_info.index) &
886 ms_res->active_src_mask) == 0)
887 continue;
888 if (ms_res->src_info[i]->frame_id == frame_id)
889 ms_res->src_sof_mask |= (1 <<
890 ms_res->src_info[i]->dual_hw_ms_info.index);
891 }
892 }
Meera Gandedd5078e2019-12-31 14:23:36 +0530893 /* the number of frames that are dropped */
894 vfe_dev->isp_page->dual_cam_drop =
895 frame_id - (src_info->frame_id + 1);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530896 ms_res->active_src_mask |= (1 << src_info->dual_hw_ms_info.index);
897 src_info->frame_id = frame_id;
898 src_info->dual_hw_ms_info.sync_state = MSM_ISP_DUAL_CAM_SYNC;
899}
900
901void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
902 enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
903{
904 struct msm_vfe_src_info *src_info = NULL;
905 struct msm_vfe_sof_info *sof_info = NULL;
906 enum msm_vfe_dual_hw_type dual_hw_type;
907 enum msm_vfe_dual_hw_ms_type ms_type;
908 unsigned long flags;
909 int i;
910 struct master_slave_resource_info *ms_res =
911 &vfe_dev->common_data->ms_resource;
912
913 spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
914 dual_hw_type =
915 vfe_dev->axi_data.src_info[frame_src].dual_hw_type;
916 ms_type =
917 vfe_dev->axi_data.src_info[frame_src].
918 dual_hw_ms_info.dual_hw_ms_type;
919
920 src_info = &vfe_dev->axi_data.src_info[frame_src];
921 if (dual_hw_type == DUAL_HW_MASTER_SLAVE) {
922 msm_isp_sync_dual_cam_frame_id(vfe_dev, ms_res, frame_src, ts);
923 if (src_info->dual_hw_ms_info.sync_state ==
924 MSM_ISP_DUAL_CAM_SYNC) {
925 /*
926 * for dual hw check that we recv sof from all
927 * linked intf
928 */
929 if (ms_res->src_sof_mask & (1 <<
930 src_info->dual_hw_ms_info.index)) {
931 pr_err_ratelimited("Frame out of sync on vfe %d\n",
932 vfe_dev->pdev->id);
Meera Gandedd5078e2019-12-31 14:23:36 +0530933 /* Notify to do reconfig at SW sync drop*/
934 vfe_dev->isp_page->dual_cam_drop_detected = 1;
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530935 /*
936 * set this isp as async mode to force
937 *it sync again at the next sof
938 */
939 src_info->dual_hw_ms_info.sync_state =
940 MSM_ISP_DUAL_CAM_ASYNC;
941 /*
942 * set the other isp as async mode to force
943 * it sync again at the next sof
944 */
945 for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
946 if (ms_res->src_info[i] == NULL)
947 continue;
948 if (src_info == ms_res->src_info[i] ||
949 ms_res->src_info[i]->
950 active == 0)
951 continue;
952 ms_res->src_info[i]->dual_hw_ms_info.
953 sync_state =
954 MSM_ISP_DUAL_CAM_ASYNC;
955 }
956 }
957 ms_res->src_sof_mask |= (1 <<
958 src_info->dual_hw_ms_info.index);
959 if (ms_res->active_src_mask == ms_res->src_sof_mask)
960 ms_res->src_sof_mask = 0;
961 }
962 sof_info = &vfe_dev->axi_data.src_info[frame_src].
963 dual_hw_ms_info.sof_info;
964 sof_info->frame_id = vfe_dev->axi_data.src_info[frame_src].
965 frame_id;
966 sof_info->timestamp_ms = ts->event_time.tv_sec * 1000 +
967 ts->event_time.tv_usec / 1000;
968 sof_info->mono_timestamp_ms = ts->buf_time.tv_sec * 1000 +
969 ts->buf_time.tv_usec / 1000;
970 spin_unlock_irqrestore(&vfe_dev->common_data->
971 common_dev_data_lock, flags);
972 } else {
973 spin_unlock_irqrestore(&vfe_dev->common_data->
974 common_dev_data_lock, flags);
975 if (frame_src == VFE_PIX_0) {
976 vfe_dev->axi_data.src_info[frame_src].frame_id +=
977 vfe_dev->axi_data.src_info[frame_src].
978 sof_counter_step;
979 ISP_DBG("%s: vfe %d sof_step %d\n", __func__,
980 vfe_dev->pdev->id,
981 vfe_dev->axi_data.src_info[frame_src].
982 sof_counter_step);
983 } else {
984 vfe_dev->axi_data.src_info[frame_src].frame_id++;
985 }
986 }
987
988 if (frame_src == VFE_PIX_0) {
Trishansh Bhardwaj586ba082018-02-24 15:02:44 +0530989 if (vfe_dev->isp_page == NULL)
990 pr_err("Invalid ISP PAGE");
991 else
992 vfe_dev->isp_page->kernel_sofid =
993 vfe_dev->axi_data.src_info[frame_src].frame_id;
994
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530995 if (!src_info->frame_id &&
996 !src_info->reg_update_frame_id &&
997 ((src_info->frame_id -
998 src_info->reg_update_frame_id) >
999 (MAX_REG_UPDATE_THRESHOLD *
1000 src_info->sof_counter_step))) {
1001 pr_err("%s:%d reg_update not received for %d frames\n",
1002 __func__, __LINE__,
1003 src_info->frame_id -
1004 src_info->reg_update_frame_id);
1005
1006 msm_isp_halt_send_error(vfe_dev,
1007 ISP_EVENT_REG_UPDATE_MISSING);
1008 }
1009 }
1010}
1011
1012static void msm_isp_update_pd_stats_idx(struct vfe_device *vfe_dev,
1013 enum msm_vfe_input_src frame_src)
1014{
1015 struct msm_vfe_axi_stream *pd_stream_info = NULL;
1016 uint32_t pingpong_status = 0, pingpong_bit = 0;
1017 struct msm_isp_buffer *done_buf = NULL;
1018 int vfe_idx = -1;
1019 unsigned long flags;
1020
1021 if (frame_src < VFE_RAW_0 || frame_src > VFE_RAW_2)
1022 return;
1023
1024 pd_stream_info = msm_isp_get_stream_common_data(vfe_dev,
1025 RDI_INTF_0 + frame_src - VFE_RAW_0);
1026
1027 if (pd_stream_info && (pd_stream_info->state == ACTIVE) &&
1028 (pd_stream_info->rdi_input_type ==
1029 MSM_CAMERA_RDI_PDAF)) {
1030 vfe_idx = msm_isp_get_vfe_idx_for_stream(
1031 vfe_dev, pd_stream_info);
1032 pingpong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
1033 get_pingpong_status(vfe_dev);
1034 pingpong_bit = ((pingpong_status >>
1035 pd_stream_info->wm[vfe_idx][0]) & 0x1);
1036 done_buf = pd_stream_info->buf[pingpong_bit];
1037 spin_lock_irqsave(&vfe_dev->common_data->
1038 common_dev_data_lock, flags);
1039 if (done_buf)
1040 vfe_dev->common_data->pd_buf_idx = done_buf->buf_idx;
1041 else
1042 vfe_dev->common_data->pd_buf_idx = 0xF;
1043 spin_unlock_irqrestore(&vfe_dev->common_data->
1044 common_dev_data_lock, flags);
1045 }
1046}
1047
1048void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
1049 enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
1050{
1051 struct msm_isp_event_data event_data;
1052 struct msm_vfe_sof_info *sof_info = NULL, *self_sof = NULL;
1053 enum msm_vfe_dual_hw_ms_type ms_type;
1054 unsigned long flags;
1055
1056 memset(&event_data, 0, sizeof(event_data));
1057
1058 switch (event_type) {
1059 case ISP_EVENT_SOF:
1060 if (frame_src == VFE_PIX_0) {
1061 if (vfe_dev->isp_sof_debug < ISP_SOF_DEBUG_COUNT)
1062 pr_err("%s: PIX0 frame id: %u\n", __func__,
1063 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1064 vfe_dev->isp_sof_debug++;
1065 } else if (frame_src == VFE_RAW_0) {
1066 if (vfe_dev->isp_raw0_debug < ISP_SOF_DEBUG_COUNT)
1067 pr_err("%s: RAW_0 frame id: %u\n", __func__,
1068 vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id);
1069 vfe_dev->isp_raw0_debug++;
1070 } else if (frame_src == VFE_RAW_1) {
1071 if (vfe_dev->isp_raw1_debug < ISP_SOF_DEBUG_COUNT)
1072 pr_err("%s: RAW_1 frame id: %u\n", __func__,
1073 vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id);
1074 vfe_dev->isp_raw1_debug++;
1075 } else if (frame_src == VFE_RAW_2) {
1076 if (vfe_dev->isp_raw2_debug < ISP_SOF_DEBUG_COUNT)
1077 pr_err("%s: RAW_2 frame id: %u\n", __func__,
1078 vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id);
1079 vfe_dev->isp_raw2_debug++;
1080 }
1081
Srikanth Uyyala827f2b12019-05-07 16:27:36 +05301082 ISP_DBG("%s: vfe %d frame_src %d frameid %d\n", __func__,
1083 vfe_dev->pdev->id, frame_src,
1084 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1085 trace_msm_cam_isp_status_dump("SOFNOTIFY:", vfe_dev->pdev->id,
1086 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1087 0, 0);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301088
1089 /*
1090 * Cannot support dual_cam and framedrop same time in union.
1091 * If need to support framedrop as well, move delta calculation
1092 * to userspace
1093 */
1094 spin_lock_irqsave(
1095 &vfe_dev->common_data->common_dev_data_lock,
1096 flags);
1097 if (vfe_dev->common_data->ms_resource.dual_sync_mode ==
1098 MSM_ISP_DUAL_CAM_SYNC &&
1099 vfe_dev->axi_data.src_info[frame_src].dual_hw_type ==
1100 DUAL_HW_MASTER_SLAVE) {
1101 struct master_slave_resource_info *ms_res =
1102 &vfe_dev->common_data->ms_resource;
1103 self_sof = &vfe_dev->axi_data.src_info[frame_src].
1104 dual_hw_ms_info.sof_info;
1105 ms_type = vfe_dev->axi_data.src_info[frame_src].
1106 dual_hw_ms_info.dual_hw_ms_type;
1107 /* only send back time delta for primatry intf */
1108 if (ms_res->primary_slv_idx > 0 &&
1109 ms_type == MS_TYPE_MASTER)
1110 sof_info = &ms_res->src_info[
1111 ms_res->primary_slv_idx]->
1112 dual_hw_ms_info.sof_info;
1113 if (ms_type != MS_TYPE_MASTER &&
1114 ms_res->master_index > 0)
1115 sof_info = &ms_res->src_info[
1116 ms_res->master_index]->
1117 dual_hw_ms_info.sof_info;
1118 if (sof_info) {
1119 event_data.u.sof_info.ms_delta_info.
1120 delta[0] =
1121 self_sof->mono_timestamp_ms -
1122 sof_info->mono_timestamp_ms;
1123 event_data.u.sof_info.ms_delta_info.
1124 num_delta_info = 1;
1125 }
1126 }
1127 spin_unlock_irqrestore(&vfe_dev->common_data->
1128 common_dev_data_lock, flags);
1129 if (frame_src == VFE_PIX_0)
1130 msm_isp_check_for_output_error(vfe_dev, ts,
1131 &event_data.u.sof_info);
1132 /*
1133 * Get and store the buf idx for PD stats
1134 * this is to send the PD stats buffer address
1135 * in BF stats done.
1136 */
1137 msm_isp_update_pd_stats_idx(vfe_dev, frame_src);
1138 break;
1139
1140 default:
1141 break;
1142 }
1143
1144 event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
1145 event_data.timestamp = ts->event_time;
1146 event_data.mono_timestamp = ts->buf_time;
1147 msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
1148}
1149
1150/**
1151 * msm_isp_calculate_framedrop() - Setup frame period and pattern
1152 * @vfe_dev: vfe device.
1153 * @stream_cfg_cmd: User space input parameter for perion/pattern.
1154 *
1155 * Initialize the h/w stream framedrop period and pattern sent
1156 * by user space.
1157 *
1158 * Returns 0 on success else error code.
1159 */
1160static int msm_isp_calculate_framedrop(
1161 struct vfe_device *vfe_dev,
1162 struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
1163{
1164 uint32_t framedrop_period = 0;
1165 struct msm_vfe_axi_stream *stream_info = NULL;
1166
1167 if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
1168 < VFE_AXI_SRC_MAX) {
1169 stream_info = msm_isp_get_stream_common_data(vfe_dev,
1170 HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
1171 } else {
1172 pr_err("%s: Invalid stream handle", __func__);
1173 return -EINVAL;
1174 }
1175 if (!stream_info) {
1176 pr_err("%s: Stream info is NULL\n", __func__);
1177 return -EINVAL;
1178 }
1179
1180 framedrop_period = msm_isp_get_framedrop_period(
1181 stream_cfg_cmd->frame_skip_pattern);
1182 stream_info->frame_skip_pattern =
1183 stream_cfg_cmd->frame_skip_pattern;
1184 if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
1185 stream_info->current_framedrop_period =
1186 MSM_VFE_STREAM_STOP_PERIOD;
1187 else
1188 stream_info->current_framedrop_period = framedrop_period;
1189
1190 stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
1191
1192 if (stream_cfg_cmd->burst_count > 0) {
1193 stream_info->stream_type = BURST_STREAM;
1194 stream_info->num_burst_capture =
1195 stream_cfg_cmd->burst_count;
1196 } else {
1197 stream_info->stream_type = CONTINUOUS_STREAM;
1198 }
1199 return 0;
1200}
1201
1202static void msm_isp_calculate_bandwidth(
1203 struct msm_vfe_axi_stream *stream_info)
1204{
1205 int bpp = 0;
1206 struct vfe_device *vfe_dev;
1207 struct msm_vfe_axi_shared_data *axi_data;
1208 int i;
1209
1210 for (i = 0; i < stream_info->num_isp; i++) {
1211 vfe_dev = stream_info->vfe_dev[i];
1212 axi_data = &vfe_dev->axi_data;
1213 if (stream_info->stream_src < RDI_INTF_0) {
1214 stream_info->bandwidth[i] =
1215 (vfe_dev->vfe_clk_info[
1216 vfe_dev->hw_info->vfe_clk_idx].clk_rate /
1217 axi_data->src_info[VFE_PIX_0].width) *
1218 stream_info->max_width[i];
1219 stream_info->bandwidth[i] =
1220 (unsigned long)stream_info->bandwidth[i] *
1221 stream_info->format_factor / ISP_Q2;
1222 } else {
1223 int rdi = SRC_TO_INTF(stream_info->stream_src);
1224
1225 bpp = msm_isp_get_bit_per_pixel(
1226 stream_info->output_format);
1227 if (rdi < VFE_SRC_MAX) {
1228 stream_info->bandwidth[i] =
1229 (vfe_dev->vfe_clk_info[
1230 vfe_dev->hw_info->vfe_clk_idx].clk_rate /
1231 8) * bpp;
1232 } else {
1233 pr_err("%s: Invalid rdi interface\n", __func__);
1234 }
1235 }
1236 }
1237}
1238
1239#ifdef CONFIG_MSM_AVTIMER
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05301240/**
1241 * msm_isp_set_avtimer_fptr() - Set avtimer function pointer
1242 * @avtimer: struct of type avtimer_fptr_t to hold function pointer.
1243 *
1244 * Initialize the function pointers sent by the avtimer driver
1245 *
1246 */
1247void msm_isp_set_avtimer_fptr(struct avtimer_fptr_t avtimer)
1248{
1249 avtimer_func.fptr_avtimer_open = avtimer.fptr_avtimer_open;
1250 avtimer_func.fptr_avtimer_enable = avtimer.fptr_avtimer_enable;
1251 avtimer_func.fptr_avtimer_get_time = avtimer.fptr_avtimer_get_time;
1252}
1253EXPORT_SYMBOL(msm_isp_set_avtimer_fptr);
1254
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301255void msm_isp_start_avtimer(void)
1256{
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05301257 if (avtimer_func.fptr_avtimer_open &&
1258 avtimer_func.fptr_avtimer_enable) {
1259 avtimer_func.fptr_avtimer_open();
1260 avtimer_func.fptr_avtimer_enable(1);
1261 }
1262}
1263void msm_isp_stop_avtimer(void)
1264{
1265 if (avtimer_func.fptr_avtimer_enable) {
1266 avtimer_func.fptr_avtimer_enable(0);
1267 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301268}
1269
1270void msm_isp_get_avtimer_ts(
1271 struct msm_isp_timestamp *time_stamp)
1272{
1273 int rc = 0;
1274 uint32_t avtimer_usec = 0;
1275 uint64_t avtimer_tick = 0;
1276
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05301277 if (avtimer_func.fptr_avtimer_get_time) {
1278 rc = avtimer_func.fptr_avtimer_get_time(&avtimer_tick);
1279 if (rc < 0) {
1280 pr_err_ratelimited("%s: Error: Invalid AVTimer Tick, rc=%d\n",
1281 __func__, rc);
1282 /* In case of error return zero AVTimer Tick Value */
1283 time_stamp->vt_time.tv_sec = 0;
1284 time_stamp->vt_time.tv_usec = 0;
1285 } else {
1286 avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
1287 time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
1288 time_stamp->vt_time.tv_usec = avtimer_usec;
1289 pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
1290 (uint32_t)(avtimer_tick), avtimer_usec);
1291 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301292 }
1293}
1294#else
1295void msm_isp_start_avtimer(void)
1296{
1297 pr_err("AV Timer is not supported\n");
1298}
1299
1300void msm_isp_get_avtimer_ts(
1301 struct msm_isp_timestamp *time_stamp)
1302{
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05301303 struct timespec ts;
1304
1305 pr_debug("%s: AVTimer driver not available using system time\n",
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301306 __func__);
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05301307
1308 get_monotonic_boottime(&ts);
1309 time_stamp->vt_time.tv_sec = ts.tv_sec;
1310 time_stamp->vt_time.tv_usec = ts.tv_nsec/1000;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301311}
1312#endif
1313
1314int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
1315{
1316 int rc = 0, i = 0;
1317 uint32_t io_format = 0;
1318 struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
1319 struct msm_vfe_axi_stream *stream_info;
1320
1321 if (stream_cfg_cmd->stream_src >= VFE_AXI_SRC_MAX) {
1322 pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
1323 stream_cfg_cmd->stream_src);
1324 return -EINVAL;
1325 }
1326 stream_info = msm_isp_get_stream_common_data(vfe_dev,
1327 stream_cfg_cmd->stream_src);
1328
1329 rc = msm_isp_axi_create_stream(vfe_dev,
1330 &vfe_dev->axi_data, stream_cfg_cmd, stream_info);
1331 if (rc) {
1332 pr_err("%s: create stream failed\n", __func__);
1333 return rc;
1334 }
1335
1336 rc = msm_isp_validate_axi_request(
1337 vfe_dev, stream_info, stream_cfg_cmd);
1338 if (rc) {
1339 msm_isp_axi_destroy_stream(vfe_dev, stream_info);
1340 pr_err("%s: Request validation failed\n", __func__);
1341 return rc;
1342 }
1343
1344 stream_info->rdi_input_type = stream_cfg_cmd->rdi_input_type;
1345 vfe_dev->reg_update_requested &=
1346 ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
1347
1348 msm_isp_axi_reserve_wm(vfe_dev, stream_info);
1349
1350 if (stream_info->stream_src < RDI_INTF_0) {
1351 io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
1352 if (stream_info->stream_src == CAMIF_RAW ||
1353 stream_info->stream_src == IDEAL_RAW) {
1354 if (stream_info->stream_src == CAMIF_RAW &&
1355 io_format != stream_info->output_format)
1356 pr_debug("%s: Overriding input format\n",
1357 __func__);
1358
1359 io_format = stream_info->output_format;
1360 }
1361 rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
1362 vfe_dev, stream_info->stream_src, io_format);
1363 if (rc) {
1364 pr_err("%s: cfg io format failed\n", __func__);
1365 goto done;
1366 }
1367 }
1368
1369 if (!stream_info->controllable_output) {
1370 /*
1371 * check that the parameters passed from second vfe is same
1372 * as first vfe, do this only for non controllable stream
1373 * right now because user driver has bug where it sends
1374 * mismatch info for controllable streams
1375 */
1376 if (stream_info->num_isp > 1) {
1377 if (stream_cfg_cmd->init_frame_drop !=
1378 stream_info->init_frame_drop) {
1379 pr_err("%s: stream %d init drop mismatch %d/%d\n",
1380 __func__, stream_info->stream_id,
1381 stream_info->init_frame_drop,
1382 stream_cfg_cmd->init_frame_drop);
1383 rc = -EINVAL;
1384 }
1385 if (stream_cfg_cmd->frame_skip_pattern !=
1386 stream_info->frame_skip_pattern) {
1387 pr_err("%s: stream %d skip pattern mismatch %d/%d\n",
1388 __func__, stream_info->stream_id,
1389 stream_info->frame_skip_pattern,
1390 stream_cfg_cmd->frame_skip_pattern);
1391 rc = -EINVAL;
1392 }
1393 if (stream_info->stream_type == CONTINUOUS_STREAM &&
1394 stream_cfg_cmd->burst_count > 0) {
1395 pr_err("%s: stream %d stream type mismatch\n",
1396 __func__, stream_info->stream_id);
1397 rc = -EINVAL;
1398 }
1399 if (stream_info->stream_type == BURST_STREAM &&
1400 stream_info->num_burst_capture !=
1401 stream_cfg_cmd->burst_count) {
1402 pr_err("%s: stream %d stream burst count mismatch %d/%d\n",
1403 __func__, stream_info->stream_id,
1404 stream_info->num_burst_capture,
1405 stream_cfg_cmd->burst_count);
1406 rc = -EINVAL;
1407 }
1408 } else {
1409 rc = msm_isp_calculate_framedrop(vfe_dev,
1410 stream_cfg_cmd);
1411 }
1412 if (rc)
1413 goto done;
1414 } else {
1415 stream_info->stream_type = BURST_STREAM;
1416 stream_info->num_burst_capture = 0;
1417 stream_info->frame_skip_pattern = NO_SKIP;
1418 stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
1419 stream_info->current_framedrop_period =
1420 MSM_VFE_STREAM_STOP_PERIOD;
1421 }
1422 if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
1423 vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
1424 msm_isp_start_avtimer();
1425 }
1426
1427 if (stream_info->num_planes > 1)
1428 msm_isp_axi_reserve_comp_mask(vfe_dev, stream_info);
1429
1430 for (i = 0; i < stream_info->num_planes; i++) {
1431 vfe_dev->hw_info->vfe_ops.axi_ops.
1432 cfg_wm_reg(vfe_dev, stream_info, i);
1433
1434 vfe_dev->hw_info->vfe_ops.axi_ops.
1435 cfg_wm_xbar_reg(vfe_dev, stream_info, i);
1436 }
1437 if (stream_info->state == INACTIVE) {
1438 /* initialize the WM ping pong with scratch buffer */
1439 msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
1440 msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
1441 }
1442done:
1443 if (rc) {
1444 msm_isp_axi_free_wm(vfe_dev, stream_info);
1445 msm_isp_axi_destroy_stream(vfe_dev, stream_info);
1446 }
1447 return rc;
1448}
1449
1450int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
1451{
1452 int rc = 0, i = 0;
1453 struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
1454 struct msm_vfe_axi_stream *stream_info;
1455 struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
1456 int vfe_idx;
1457
1458 if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
1459 VFE_AXI_SRC_MAX) {
1460 pr_err("%s: Invalid stream handle\n", __func__);
1461 return -EINVAL;
1462 }
1463 stream_info = msm_isp_get_stream_common_data(vfe_dev,
1464 HANDLE_TO_IDX(stream_release_cmd->stream_handle));
1465
1466 vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
1467 if (vfe_idx == -ENOTTY ||
1468 stream_release_cmd->stream_handle !=
1469 stream_info->stream_handle[vfe_idx]) {
1470 pr_err("%s: Invalid stream %pK handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
1471 __func__, stream_info,
1472 stream_release_cmd->stream_handle,
1473 vfe_idx != -ENOTTY ?
1474 stream_info->stream_handle[vfe_idx] : 0, vfe_idx,
1475 vfe_dev->pdev->id, stream_info->num_isp);
1476 return -EINVAL;
1477 }
1478
1479 if (stream_info->state != INACTIVE && stream_info->state != AVAILABLE) {
1480 stream_cfg.cmd = STOP_STREAM;
1481 stream_cfg.num_streams = 1;
1482 stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
1483 msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
1484 }
1485
1486 for (i = 0; i < stream_info->num_planes; i++) {
1487 vfe_dev->hw_info->vfe_ops.axi_ops.
1488 clear_wm_reg(vfe_dev, stream_info, i);
1489
1490 vfe_dev->hw_info->vfe_ops.axi_ops.
1491 clear_wm_xbar_reg(vfe_dev, stream_info, i);
1492 }
1493
1494 if (stream_info->num_planes > 1)
1495 msm_isp_axi_free_comp_mask(vfe_dev, stream_info);
1496
1497 vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
1498 msm_isp_axi_free_wm(vfe_dev, stream_info);
1499
1500 msm_isp_axi_destroy_stream(vfe_dev, stream_info);
1501
1502 return rc;
1503}
1504
1505void msm_isp_release_all_axi_stream(struct vfe_device *vfe_dev)
1506{
1507 struct msm_vfe_axi_stream_release_cmd
1508 stream_release_cmd[VFE_AXI_SRC_MAX];
1509 struct msm_vfe_axi_stream_cfg_cmd stream_cfg_cmd;
1510 struct msm_vfe_axi_stream *stream_info;
1511 int i;
1512 int vfe_idx;
1513 int num_stream = 0;
1514 unsigned long flags;
1515
1516 stream_cfg_cmd.cmd = STOP_STREAM;
1517 stream_cfg_cmd.num_streams = 0;
1518
1519 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
1520 stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
1521 spin_lock_irqsave(&stream_info->lock, flags);
1522 vfe_idx = msm_isp_get_vfe_idx_for_stream_user(
1523 vfe_dev, stream_info);
1524 if (-ENOTTY == vfe_idx) {
1525 spin_unlock_irqrestore(&stream_info->lock, flags);
1526 continue;
1527 }
1528 stream_release_cmd[num_stream++].stream_handle =
1529 stream_info->stream_handle[vfe_idx];
1530 if (stream_info->state == INACTIVE) {
1531 spin_unlock_irqrestore(&stream_info->lock, flags);
1532 continue;
1533 }
1534 stream_cfg_cmd.stream_handle[
1535 stream_cfg_cmd.num_streams] =
1536 stream_info->stream_handle[vfe_idx];
1537 stream_cfg_cmd.num_streams++;
1538 spin_unlock_irqrestore(&stream_info->lock, flags);
1539 }
1540 if (stream_cfg_cmd.num_streams)
1541 msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg_cmd);
1542
1543 for (i = 0; i < num_stream; i++)
1544 msm_isp_release_axi_stream(vfe_dev, &stream_release_cmd[i]);
1545}
1546
1547static void msm_isp_axi_stream_enable_cfg(
1548 struct msm_vfe_axi_stream *stream_info)
1549{
1550 int enable_wm = 0;
1551 struct vfe_device *vfe_dev;
1552 struct msm_vfe_axi_shared_data *axi_data;
1553 uint32_t stream_idx = stream_info->stream_src;
1554 int k;
1555 int i;
1556
1557 WARN_ON(stream_idx >= VFE_AXI_SRC_MAX);
1558
1559 WARN_ON(stream_info->state != START_PENDING &&
1560 stream_info->state != RESUME_PENDING &&
1561 stream_info->state != STOP_PENDING &&
1562 stream_info->state != PAUSE_PENDING);
1563
1564 if (stream_info->state == START_PENDING ||
1565 stream_info->state == RESUME_PENDING) {
1566 enable_wm = 1;
1567 } else {
1568 enable_wm = 0;
1569 }
1570
1571 for (k = 0; k < stream_info->num_isp; k++) {
1572 vfe_dev = stream_info->vfe_dev[k];
1573 axi_data = &vfe_dev->axi_data;
1574 for (i = 0; i < stream_info->num_planes; i++) {
1575 vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
1576 vfe_dev->vfe_base,
1577 stream_info->wm[k][i], enable_wm);
1578 if (enable_wm)
1579 continue;
1580 /*
1581 * Issue a reg update for Raw Snapshot Case
1582 * since we dont have reg update ack
1583 */
1584 if (vfe_dev->axi_data.src_info[VFE_PIX_0].
1585 raw_stream_count > 0
1586 && vfe_dev->axi_data.src_info[VFE_PIX_0].
1587 stream_count == 0) {
1588 if (stream_info->stream_src == CAMIF_RAW ||
1589 stream_info->stream_src == IDEAL_RAW) {
1590 vfe_dev->hw_info->vfe_ops.core_ops.
1591 reg_update(vfe_dev,
1592 VFE_PIX_0);
1593 }
1594 }
1595 }
1596 if (stream_info->state == START_PENDING)
1597 axi_data->num_active_stream++;
1598 else if (stream_info->state == STOP_PENDING)
1599 axi_data->num_active_stream--;
1600 }
1601}
1602
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +05301603static void msm_isp_free_pending_buffer(
1604 struct vfe_device *vfe_dev,
1605 struct msm_vfe_axi_stream *stream_info,
1606 struct msm_isp_timestamp *ts)
1607{
1608 struct timeval *time_stamp;
1609 struct msm_isp_buffer *done_buf = NULL;
1610 uint32_t frame_id;
1611 int rc;
1612
1613 if (!stream_info->controllable_output ||
1614 !stream_info->pending_buf_info.is_buf_done_pending) {
1615 return;
1616 }
1617
1618 if (vfe_dev->vt_enable) {
1619 msm_isp_get_avtimer_ts(ts);
1620 time_stamp = &ts->vt_time;
1621 } else {
1622 time_stamp = &ts->buf_time;
1623 }
1624
1625 done_buf = stream_info->pending_buf_info.buf;
1626 frame_id = stream_info->pending_buf_info.frame_id;
1627 if (done_buf) {
1628 rc = msm_isp_process_done_buf(vfe_dev, stream_info,
1629 done_buf, time_stamp, frame_id);
1630 if (rc == 0) {
1631 stream_info->pending_buf_info.buf = NULL;
1632 stream_info->pending_buf_info.is_buf_done_pending = 0;
1633 }
1634 }
1635}
1636
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301637static void __msm_isp_axi_stream_update(
1638 struct msm_vfe_axi_stream *stream_info,
1639 struct msm_isp_timestamp *ts)
1640{
1641 int j;
1642 int intf = SRC_TO_INTF(stream_info->stream_src);
1643 struct vfe_device *vfe_dev;
1644 int k;
1645
1646 switch (stream_info->state) {
1647 case UPDATING:
1648 stream_info->state = ACTIVE;
1649 complete_all(&stream_info->active_comp);
1650 break;
1651 case STOP_PENDING:
1652 msm_isp_axi_stream_enable_cfg(stream_info);
1653 stream_info->state = STOPPING;
1654 break;
1655 case START_PENDING:
1656 msm_isp_axi_stream_enable_cfg(stream_info);
1657 stream_info->state = STARTING;
1658 break;
1659 case STOPPING:
1660 stream_info->state = INACTIVE;
1661 for (k = 0; k < MSM_ISP_COMP_IRQ_MAX; k++)
1662 stream_info->composite_irq[k] = 0;
1663 complete_all(&stream_info->inactive_comp);
1664 break;
1665 case STARTING:
1666 stream_info->state = ACTIVE;
1667 complete_all(&stream_info->active_comp);
1668 break;
1669 case PAUSING:
1670 stream_info->state = PAUSED;
1671 msm_isp_reload_ping_pong_offset(stream_info);
1672 for (j = 0; j < stream_info->num_planes; j++) {
1673 for (k = 0; k < stream_info->num_isp; k++) {
1674 vfe_dev = stream_info->vfe_dev[k];
1675 vfe_dev->hw_info->vfe_ops.axi_ops.
1676 cfg_wm_reg(vfe_dev, stream_info, j);
1677 }
1678 }
1679 stream_info->state = RESUME_PENDING;
1680 msm_isp_axi_stream_enable_cfg(stream_info);
1681 stream_info->state = RESUMING;
1682 break;
1683 case RESUMING:
1684 stream_info->runtime_output_format = stream_info->output_format;
1685 stream_info->state = ACTIVE;
1686 complete_all(&stream_info->active_comp);
1687 for (j = 0; j < stream_info->num_isp; j++) {
1688 /* notify that all streams have been updated */
1689 msm_isp_notify(stream_info->vfe_dev[j],
1690 ISP_EVENT_STREAM_UPDATE_DONE, intf, ts);
1691 atomic_set(&stream_info->vfe_dev[j]->
1692 axi_data.axi_cfg_update[intf], 0);
1693 }
1694 stream_info->update_vfe_mask = 0;
1695 break;
1696 default:
1697 break;
1698 }
1699}
1700
1701void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
1702 enum msm_vfe_input_src frame_src,
1703 struct msm_isp_timestamp *ts)
1704{
1705 int i;
1706 unsigned long flags;
1707 struct msm_vfe_axi_stream *stream_info;
1708
1709 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
1710 stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
1711 if (SRC_TO_INTF(stream_info->stream_src) !=
1712 frame_src) {
1713 ISP_DBG("%s stream_src %d frame_src %d\n", __func__,
1714 SRC_TO_INTF(
1715 stream_info->stream_src),
1716 frame_src);
1717 continue;
1718 }
1719 if (stream_info->state == AVAILABLE)
1720 continue;
1721 spin_lock_irqsave(&stream_info->lock, flags);
1722 __msm_isp_axi_stream_update(stream_info, ts);
1723 spin_unlock_irqrestore(&stream_info->lock, flags);
1724 }
1725}
1726
1727static void msm_isp_reload_ping_pong_offset(
1728 struct msm_vfe_axi_stream *stream_info)
1729{
1730 int i, j;
1731 uint32_t bit;
1732 struct msm_isp_buffer *buf;
1733 int32_t buf_size_byte = 0;
1734 int32_t word_per_line = 0;
1735 int k;
1736 struct vfe_device *vfe_dev;
1737
1738 for (k = 0; k < stream_info->num_isp; k++) {
1739 vfe_dev = stream_info->vfe_dev[k];
1740 for (i = 0; i < 2; i++) {
1741 buf = stream_info->buf[i];
1742 if (!buf)
1743 continue;
1744
1745 bit = i ? 0 : 1;
1746
1747 for (j = 0; j < stream_info->num_planes; j++) {
1748 word_per_line = msm_isp_cal_word_per_line(
1749 stream_info->output_format, stream_info->
1750 plane_cfg[k][j].output_stride);
1751 if (word_per_line < 0) {
1752 /* 0 means no prefetch*/
1753 word_per_line = 0;
1754 buf_size_byte = 0;
1755 } else {
1756 buf_size_byte = (word_per_line * 8 *
1757 stream_info->plane_cfg[k][j].
1758 output_scan_lines) - stream_info->
1759 plane_cfg[k][j].plane_addr_offset;
1760 }
1761
1762 vfe_dev->hw_info->vfe_ops.axi_ops.
1763 update_ping_pong_addr(
1764 vfe_dev->vfe_base,
1765 stream_info->wm[k][j],
1766 bit,
1767 buf->mapped_info[j].paddr +
1768 stream_info->plane_cfg[k][j].
1769 plane_addr_offset,
1770 buf_size_byte);
1771 }
1772 }
1773 }
1774}
1775
1776static int msm_isp_update_deliver_count(struct vfe_device *vfe_dev,
1777 struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_bit,
1778 struct msm_isp_buffer *done_buf)
1779{
1780 int rc = 0;
1781
1782 if (!stream_info->controllable_output)
1783 goto done;
1784
1785 if (!stream_info->undelivered_request_cnt ||
1786 (done_buf == NULL)) {
1787 pr_err_ratelimited("%s:%d error undelivered_request_cnt 0\n",
1788 __func__, __LINE__);
1789 rc = -EINVAL;
1790 goto done;
1791 } else {
1792 if ((done_buf->is_drop_reconfig == 1) &&
1793 (stream_info->sw_ping_pong_bit == -1)) {
1794 goto done;
1795 }
1796 /*After wm reload, we get bufdone for ping buffer*/
1797 if (stream_info->sw_ping_pong_bit == -1)
1798 stream_info->sw_ping_pong_bit = 0;
1799 if (done_buf->is_drop_reconfig != 1)
1800 stream_info->undelivered_request_cnt--;
1801 if (pingpong_bit != stream_info->sw_ping_pong_bit) {
1802 pr_err("%s:%d ping pong bit actual %d sw %d\n",
1803 __func__, __LINE__, pingpong_bit,
1804 stream_info->sw_ping_pong_bit);
1805 rc = -EINVAL;
1806 goto done;
1807 }
1808 stream_info->sw_ping_pong_bit ^= 1;
1809 }
1810done:
1811 return rc;
1812}
1813
1814void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
1815{
1816 struct msm_isp_event_data error_event;
1817 struct msm_vfe_axi_halt_cmd halt_cmd;
1818 struct vfe_device *temp_dev = NULL;
1819 uint32_t irq_status0 = 0, irq_status1 = 0;
Srikanth Uyyalac2558ad2018-05-04 14:30:53 +05301820 struct vfe_device *vfe_dev_other = NULL;
1821 uint32_t vfe_id_other = 0;
1822 unsigned long flags;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301823
1824 if (atomic_read(&vfe_dev->error_info.overflow_state) !=
1825 NO_OVERFLOW)
1826 /* Recovery is already in Progress */
1827 return;
1828
Srikanth Uyyalabf0d5ad2018-05-04 14:23:10 +05301829 /* if there are no active streams - do not start recovery */
Srikanth Uyyalac2558ad2018-05-04 14:30:53 +05301830 if (vfe_dev->is_split) {
1831 if (vfe_dev->pdev->id == ISP_VFE0)
1832 vfe_id_other = ISP_VFE1;
1833 else
1834 vfe_id_other = ISP_VFE0;
1835
1836 spin_lock_irqsave(
1837 &vfe_dev->common_data->common_dev_data_lock, flags);
1838 vfe_dev_other = vfe_dev->common_data->dual_vfe_res->
1839 vfe_dev[vfe_id_other];
1840 if (!vfe_dev->axi_data.num_active_stream ||
1841 !vfe_dev_other->axi_data.num_active_stream) {
1842 spin_unlock_irqrestore(
1843 &vfe_dev->common_data->common_dev_data_lock,
1844 flags);
1845 pr_err("%s:skip the recovery as no active streams\n",
1846 __func__);
1847 return;
1848 }
1849 spin_unlock_irqrestore(
1850 &vfe_dev->common_data->common_dev_data_lock, flags);
1851 } else if (!vfe_dev->axi_data.num_active_stream)
Srikanth Uyyalabf0d5ad2018-05-04 14:23:10 +05301852 return;
1853
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301854 if (event == ISP_EVENT_PING_PONG_MISMATCH &&
1855 vfe_dev->axi_data.recovery_count < MAX_RECOVERY_THRESHOLD) {
1856 pr_err("%s: ping pong mismatch on vfe%d recovery count %d\n",
1857 __func__, vfe_dev->pdev->id,
1858 vfe_dev->axi_data.recovery_count);
1859 msm_isp_process_overflow_irq(vfe_dev,
1860 &irq_status0, &irq_status1, 1);
1861 vfe_dev->axi_data.recovery_count++;
1862 return;
1863 }
1864 memset(&halt_cmd, 0, sizeof(struct msm_vfe_axi_halt_cmd));
1865 memset(&error_event, 0, sizeof(struct msm_isp_event_data));
1866 halt_cmd.stop_camif = 1;
1867 halt_cmd.overflow_detected = 0;
1868 halt_cmd.blocking_halt = 0;
1869
1870 pr_err("%s: vfe%d fatal error!\n", __func__, vfe_dev->pdev->id);
1871
1872 atomic_set(&vfe_dev->error_info.overflow_state,
1873 HALT_ENFORCED);
1874
1875 vfe_dev->hw_info->vfe_ops.core_ops.set_halt_restart_mask(vfe_dev);
1876 if (vfe_dev->is_split) {
1877 int other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0 ?
1878 ISP_VFE1 : ISP_VFE0);
1879 temp_dev = vfe_dev->common_data->
1880 dual_vfe_res->vfe_dev[other_vfe_id];
1881 atomic_set(&temp_dev->error_info.overflow_state,
1882 HALT_ENFORCED);
1883 temp_dev->hw_info->vfe_ops.core_ops.
1884 set_halt_restart_mask(temp_dev);
1885 }
1886 error_event.frame_id =
1887 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1888
1889 msm_isp_send_event(vfe_dev, event, &error_event);
1890}
1891
1892int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
1893 unsigned long fault_addr)
1894{
1895 int i, j;
1896 struct msm_isp_buffer *buf = NULL;
1897 uint32_t pingpong_bit;
1898 struct msm_vfe_axi_stream *stream_info = NULL;
1899 int k;
1900
1901 for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
1902 stream_info = msm_isp_get_stream_common_data(vfe_dev, j);
1903 if (stream_info->state == INACTIVE ||
1904 stream_info->state == AVAILABLE)
1905 continue;
1906
1907 for (pingpong_bit = 0; pingpong_bit < 2; pingpong_bit++) {
1908 dma_addr_t temp;
1909
1910 buf = stream_info->buf[pingpong_bit];
1911 if (buf == NULL) {
1912 pr_err("%s: buf NULL for stream %x num_isp %d\n",
1913 __func__,
1914 stream_info->stream_src,
1915 stream_info->num_isp);
1916 continue;
1917 }
1918 temp = buf->mapped_info[0].paddr +
1919 buf->mapped_info[0].len;
1920 pr_err("%s: stream %x ping bit %d uses buffer %pK-%pK, num_isp %d\n",
1921 __func__, stream_info->stream_src,
1922 pingpong_bit,
1923 &buf->mapped_info[0].paddr, &temp,
1924 stream_info->num_isp);
1925
1926 for (i = 0; i < stream_info->num_planes; i++) {
1927 for (k = 0; k < stream_info->num_isp; k++) {
1928 pr_debug(
1929 "%s: stream_id %x ping-pong %d plane %d start_addr %pK addr_offset %x len %zx stride %d scanline %d\n"
1930 , __func__, stream_info->stream_id,
1931 pingpong_bit, i,
1932 (void *)buf->mapped_info[i].paddr,
1933 stream_info->
1934 plane_cfg[k][i].plane_addr_offset,
1935 buf->mapped_info[i].len,
1936 stream_info->
1937 plane_cfg[k][i].output_stride,
1938 stream_info->
1939 plane_cfg[k][i].output_scan_lines
1940 );
1941 }
1942 }
1943 }
1944 }
1945
1946 return 0;
1947}
1948
1949static struct msm_isp_buffer *msm_isp_get_stream_buffer(
1950 struct vfe_device *vfe_dev,
1951 struct msm_vfe_axi_stream *stream_info)
1952{
1953 int rc = 0;
1954 uint32_t bufq_handle = 0;
1955 struct msm_isp_buffer *buf = NULL;
1956 struct msm_vfe_frame_request_queue *queue_req;
1957 uint32_t buf_index = MSM_ISP_INVALID_BUF_INDEX;
1958
1959 if (!stream_info->controllable_output) {
1960 bufq_handle = stream_info->bufq_handle
1961 [VFE_BUF_QUEUE_DEFAULT];
1962 } else {
1963 queue_req = list_first_entry_or_null(
1964 &stream_info->request_q,
1965 struct msm_vfe_frame_request_queue, list);
1966 if (!queue_req)
1967 return buf;
1968
1969 bufq_handle = stream_info->
1970 bufq_handle[queue_req->buff_queue_id];
1971
1972 if (!bufq_handle ||
1973 stream_info->request_q_cnt <= 0) {
1974 pr_err_ratelimited("%s: Drop request. Shared stream is stopped.\n",
1975 __func__);
1976 return buf;
1977 }
1978 buf_index = queue_req->buf_index;
1979 queue_req->cmd_used = 0;
1980 list_del(&queue_req->list);
1981 stream_info->request_q_cnt--;
1982 }
1983 rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
1984 vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
1985
1986 if (rc == -EFAULT) {
1987 msm_isp_halt_send_error(vfe_dev,
1988 ISP_EVENT_BUF_FATAL_ERROR);
1989 return buf;
1990 }
1991 if (rc < 0)
1992 return buf;
1993
1994 if (buf->num_planes != stream_info->num_planes) {
1995 pr_err("%s: Invalid buffer\n", __func__);
1996 vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
1997 bufq_handle, buf->buf_idx);
1998 buf = NULL;
1999 }
2000 return buf;
2001}
2002
2003int msm_isp_cfg_offline_ping_pong_address(struct vfe_device *vfe_dev,
2004 struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
2005 uint32_t buf_idx)
2006{
2007 int i, rc = 0;
2008 struct msm_isp_buffer *buf = NULL;
2009 uint32_t pingpong_bit;
2010 uint32_t buffer_size_byte = 0;
2011 int32_t word_per_line = 0;
2012 dma_addr_t paddr;
2013 uint32_t bufq_handle = 0;
2014 int vfe_idx;
2015
2016 bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
2017
2018 if (!vfe_dev->is_split) {
2019 rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
2020 vfe_dev->buf_mgr, bufq_handle, buf_idx, &buf);
2021 if (rc < 0 || !buf) {
2022 pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
2023 __func__, rc, buf);
2024 return -EINVAL;
2025 }
2026
2027 if (buf->num_planes != stream_info->num_planes) {
2028 pr_err("%s: Invalid buffer\n", __func__);
2029 vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
2030 bufq_handle, buf->buf_idx);
2031 return -EINVAL;
2032 }
2033 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
2034 pingpong_bit = ((pingpong_status >>
2035 stream_info->wm[vfe_idx][0]) & 0x1);
2036
2037 for (i = 0; i < stream_info->num_planes; i++) {
2038 word_per_line = msm_isp_cal_word_per_line(
2039 stream_info->output_format,
2040 stream_info->plane_cfg[vfe_idx][i].
2041 output_stride);
2042 if (word_per_line < 0) {
2043 /* 0 means no prefetch*/
2044 word_per_line = 0;
2045 buffer_size_byte = 0;
2046 } else {
2047 buffer_size_byte = (word_per_line * 8 *
2048 stream_info->plane_cfg[vfe_idx][i].
2049 output_scan_lines) -
2050 stream_info->
2051 plane_cfg[vfe_idx][i].plane_addr_offset;
2052 }
2053 paddr = buf->mapped_info[i].paddr;
2054
2055 vfe_dev->hw_info->vfe_ops.axi_ops.
2056 update_ping_pong_addr(
2057 vfe_dev->vfe_base, stream_info->wm[vfe_idx][i],
2058 pingpong_bit, paddr +
2059 stream_info->
2060 plane_cfg[vfe_idx][i].plane_addr_offset,
2061 buffer_size_byte);
2062 stream_info->buf[!pingpong_bit] = buf;
2063 buf->pingpong_bit = !pingpong_bit;
2064 }
2065 buf->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
2066 stream_info->buf[!pingpong_bit] = buf;
2067 buf->pingpong_bit = !pingpong_bit;
2068 }
2069 return rc;
2070
2071}
2072
2073static int msm_isp_cfg_ping_pong_address(
2074 struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
2075 struct msm_isp_buffer *buf)
2076{
2077 int i;
2078 int j;
2079 uint32_t pingpong_bit;
2080 struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
2081 uint32_t buffer_size_byte = 0;
2082 int32_t word_per_line = 0;
2083 dma_addr_t paddr;
2084
2085
2086 /* Isolate pingpong_bit from pingpong_status */
2087 pingpong_bit = ((pingpong_status >>
2088 stream_info->wm[0][0]) & 0x1);
2089
2090 /* return if buffer already present */
2091 if (stream_info->buf[!pingpong_bit]) {
2092 pr_err("stream %x buffer already set for pingpong %d\n",
2093 stream_info->stream_src, !pingpong_bit);
2094 return 1;
2095 }
2096
2097 if (buf == NULL)
2098 buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
2099
2100 if (!buf) {
2101 msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
Ramesh Vc97c71e2019-04-17 10:50:13 +05302102 if (stream_info->controllable_output)
2103 return 1;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302104 return 0;
2105 }
2106
2107 for (i = 0; i < stream_info->num_planes; i++) {
2108 paddr = buf->mapped_info[i].paddr;
2109 ISP_DBG(
2110 "%s: vfe %d config buf %d to pingpong %d stream %x\n",
2111 __func__, vfe_dev->pdev->id,
2112 buf->buf_idx, !pingpong_bit,
2113 stream_info->stream_id);
2114 for (j = 0; j < stream_info->num_isp; j++) {
2115 vfe_dev = stream_info->vfe_dev[j];
2116 word_per_line =
2117 msm_isp_cal_word_per_line(
2118 stream_info->output_format,
2119 stream_info->plane_cfg[j][i].output_stride);
2120 if (word_per_line < 0) {
2121 /* 0 means no prefetch*/
2122 word_per_line = 0;
2123 buffer_size_byte = 0;
2124 } else {
2125 buffer_size_byte =
2126 (word_per_line * 8 *
2127 stream_info->plane_cfg[j][i].
2128 output_scan_lines) -
2129 stream_info->plane_cfg[j][i].
2130 plane_addr_offset;
2131 }
2132 vfe_dev->hw_info->vfe_ops.axi_ops.
2133 update_ping_pong_addr(
2134 vfe_dev->vfe_base,
2135 stream_info->wm[j][i],
2136 pingpong_bit, paddr +
2137 stream_info->plane_cfg[j][i].
2138 plane_addr_offset,
2139 buffer_size_byte);
2140 }
2141 }
2142 stream_info->buf[!pingpong_bit] = buf;
2143 buf->pingpong_bit = !pingpong_bit;
2144 return 0;
2145}
2146
2147static void msm_isp_handle_done_buf_frame_id_mismatch(
2148 struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info,
2149 struct msm_isp_buffer *buf, struct timeval *time_stamp,
2150 uint32_t frame_id)
2151{
2152 struct msm_isp_event_data error_event;
2153 int ret = 0;
2154
2155 memset(&error_event, 0, sizeof(error_event));
2156 error_event.frame_id =
2157 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
2158 error_event.u.error_info.err_type =
2159 ISP_ERROR_FRAME_ID_MISMATCH;
2160 if (stream_info->buf_divert)
2161 vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
2162 buf->bufq_handle, buf->buf_idx);
2163 else
2164 ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
2165 buf->bufq_handle, buf->buf_idx, time_stamp,
2166 frame_id,
2167 stream_info->runtime_output_format);
2168 if (ret == -EFAULT) {
2169 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
2170 return;
2171 }
2172 msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR,
2173 &error_event);
2174 pr_err("%s: Error! frame id mismatch!! 1st buf frame %d,curr frame %d\n",
2175 __func__, buf->frame_id, frame_id);
2176 vfe_dev->buf_mgr->frameId_mismatch_recovery = 1;
2177}
2178
2179static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
2180 struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
2181 struct timeval *time_stamp, uint32_t frame_id)
2182{
2183 int rc;
2184 unsigned long flags;
2185 struct msm_isp_event_data buf_event;
2186 uint32_t stream_idx = stream_info->stream_src;
2187 uint32_t buf_src;
2188 uint8_t drop_frame = 0;
2189 struct msm_isp_bufq *bufq = NULL;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302190 memset(&buf_event, 0, sizeof(buf_event));
2191
2192 if (stream_idx >= VFE_AXI_SRC_MAX) {
2193 pr_err_ratelimited("%s: Invalid stream_idx", __func__);
2194 return -EINVAL;
2195 }
2196
2197 if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX) {
2198 pr_err_ratelimited("%s: Invalid stream index, put buf back to vb2 queue\n",
2199 __func__);
2200 rc = vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
2201 buf->bufq_handle, buf->buf_idx);
2202 return -EINVAL;
2203 }
2204
2205 if (stream_info->stream_type != BURST_STREAM &&
2206 (stream_info->sw_skip.stream_src_mask &
2207 (1 << stream_info->stream_src))) {
2208 /* Hw stream output of this src is requested for drop */
2209 if (stream_info->sw_skip.skip_mode == SKIP_ALL) {
2210 /* drop all buffers */
2211 drop_frame = 1;
2212 } else if (stream_info->sw_skip.skip_mode == SKIP_RANGE &&
2213 (stream_info->sw_skip.min_frame_id <= frame_id &&
2214 stream_info->sw_skip.max_frame_id >= frame_id)) {
2215 drop_frame = 1;
2216 } else if (frame_id > stream_info->sw_skip.max_frame_id) {
2217 spin_lock_irqsave(&stream_info->lock, flags);
2218 memset(&stream_info->sw_skip, 0,
2219 sizeof(struct msm_isp_sw_framskip));
2220 spin_unlock_irqrestore(&stream_info->lock, flags);
2221 }
2222 }
2223
2224 rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
2225 buf->bufq_handle, &buf_src);
2226 if (rc != 0) {
2227 pr_err_ratelimited("%s: Error getting buf_src\n", __func__);
2228 return -EINVAL;
2229 }
2230
2231 if (drop_frame) {
2232 buf->buf_debug.put_state[
2233 buf->buf_debug.put_state_last] =
2234 MSM_ISP_BUFFER_STATE_DROP_SKIP;
2235 buf->buf_debug.put_state_last ^= 1;
2236 if (stream_info->buf_divert)
2237 vfe_dev->buf_mgr->ops->put_buf(
2238 vfe_dev->buf_mgr,
2239 buf->bufq_handle, buf->buf_idx);
2240 else
2241 rc = vfe_dev->buf_mgr->ops->buf_done(
2242 vfe_dev->buf_mgr,
2243 buf->bufq_handle, buf->buf_idx,
2244 time_stamp, frame_id,
2245 stream_info->runtime_output_format);
2246
2247 if (rc == -EFAULT) {
2248 msm_isp_halt_send_error(vfe_dev,
2249 ISP_EVENT_BUF_FATAL_ERROR);
2250 return rc;
2251 }
2252 if (!rc) {
2253 ISP_DBG("%s:%d vfe_id %d Buffer dropped %d\n",
2254 __func__, __LINE__, vfe_dev->pdev->id,
2255 frame_id);
2256 /*
2257 * Return rc which is 0 at this point so that
2258 * we can cfg ping pong and we can continue
2259 * streaming
2260 */
2261 return rc;
2262 }
2263 }
2264
2265 buf_event.frame_id = frame_id;
2266 buf_event.timestamp = *time_stamp;
2267 buf_event.u.buf_done.session_id = stream_info->session_id;
2268 buf_event.u.buf_done.stream_id = stream_info->stream_id;
2269 buf_event.u.buf_done.handle = buf->bufq_handle;
2270 buf_event.u.buf_done.buf_idx = buf->buf_idx;
2271 buf_event.u.buf_done.output_format =
2272 stream_info->runtime_output_format;
2273 if (vfe_dev->fetch_engine_info.is_busy &&
2274 SRC_TO_INTF(stream_info->stream_src) == VFE_PIX_0) {
2275 vfe_dev->fetch_engine_info.is_busy = 0;
2276 }
2277
2278 if (stream_info->buf_divert &&
2279 buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
2280
2281 bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
2282 buf->bufq_handle);
2283 if (!bufq) {
2284 pr_err("%s: Invalid bufq buf_handle %x\n",
2285 __func__, buf->bufq_handle);
2286 return -EINVAL;
2287 }
2288
2289 /* divert native buffers */
2290 vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
2291 buf->bufq_handle, buf->buf_idx, time_stamp,
2292 frame_id);
2293
2294 if ((bufq != NULL) && bufq->buf_type == ISP_SHARE_BUF)
2295 msm_isp_send_event(vfe_dev->common_data->
2296 dual_vfe_res->vfe_dev[ISP_VFE1],
2297 ISP_EVENT_BUF_DIVERT, &buf_event);
2298 else
2299 msm_isp_send_event(vfe_dev,
2300 ISP_EVENT_BUF_DIVERT, &buf_event);
2301 } else {
2302 ISP_DBG("%s: vfe_id %d send buf done buf-id %d bufq %x\n",
2303 __func__, vfe_dev->pdev->id, buf->buf_idx,
2304 buf->bufq_handle);
2305 msm_isp_send_event(vfe_dev, ISP_EVENT_BUF_DONE,
2306 &buf_event);
2307 buf->buf_debug.put_state[
2308 buf->buf_debug.put_state_last] =
2309 MSM_ISP_BUFFER_STATE_PUT_BUF;
2310 buf->buf_debug.put_state_last ^= 1;
2311 rc = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
Meera Gandedd5078e2019-12-31 14:23:36 +05302312 buf->bufq_handle, buf->buf_idx, time_stamp,
2313 frame_id, stream_info->runtime_output_format);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302314 if (rc == -EFAULT) {
2315 msm_isp_halt_send_error(vfe_dev,
2316 ISP_EVENT_BUF_FATAL_ERROR);
2317 return rc;
2318 }
2319 }
2320
2321 return 0;
2322}
2323
2324int msm_isp_drop_frame(struct vfe_device *vfe_dev,
2325 struct msm_vfe_axi_stream *stream_info, struct msm_isp_timestamp *ts,
2326 struct msm_isp_sof_info *sof_info)
2327{
2328 struct msm_isp_buffer *done_buf = NULL;
2329 uint32_t pingpong_status;
2330 unsigned long flags;
2331 struct msm_isp_bufq *bufq = NULL;
2332 uint32_t pingpong_bit;
2333 int vfe_idx;
2334 int rc = -1;
2335
2336 if (!vfe_dev || !stream_info || !ts || !sof_info) {
2337 pr_err("%s %d vfe_dev %pK stream_info %pK ts %pK op_info %pK\n",
2338 __func__, __LINE__, vfe_dev, stream_info, ts,
2339 sof_info);
2340 return -EINVAL;
2341 }
2342 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
2343
2344 pingpong_status =
2345 ~vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
2346
2347 spin_lock_irqsave(&stream_info->lock, flags);
2348 pingpong_bit =
2349 (~(pingpong_status >> stream_info->wm[vfe_idx][0]) & 0x1);
2350 done_buf = stream_info->buf[pingpong_bit];
2351 if (done_buf &&
2352 (stream_info->composite_irq[MSM_ISP_COMP_IRQ_EPOCH] == 0)) {
2353 if ((stream_info->sw_ping_pong_bit != -1) &&
2354 !vfe_dev->reg_updated) {
2355 rc = msm_isp_cfg_ping_pong_address(
2356 stream_info, ~pingpong_status, done_buf);
2357 if (rc < 0) {
2358 ISP_DBG("%s: Error configuring ping_pong\n",
2359 __func__);
2360 bufq = vfe_dev->buf_mgr->ops->get_bufq(
2361 vfe_dev->buf_mgr,
2362 done_buf->bufq_handle);
2363 if (!bufq) {
2364 spin_unlock_irqrestore(
2365 &stream_info->lock,
2366 flags);
2367 pr_err("%s: Invalid bufq buf_handle %x\n",
2368 __func__,
2369 done_buf->bufq_handle);
2370 return -EINVAL;
2371 }
2372 sof_info->reg_update_fail_mask_ext |=
2373 (bufq->bufq_handle & 0xFF);
2374 }
2375 }
2376 /*Avoid Drop Frame and re-issue pingpong cfg*/
2377 /*this notify is per ping and pong buffer*/
2378 done_buf->is_drop_reconfig = 1;
2379 stream_info->current_framedrop_period = 1;
2380 /*Avoid Multiple request frames for single SOF*/
2381 vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame = false;
2382
2383 if (stream_info->current_framedrop_period !=
2384 stream_info->requested_framedrop_period) {
2385 msm_isp_cfg_framedrop_reg(stream_info);
2386 }
2387 }
2388 spin_unlock_irqrestore(&stream_info->lock, flags);
2389
2390 /* if buf done will not come, we need to process it ourself */
2391 if (stream_info->activated_framedrop_period ==
2392 MSM_VFE_STREAM_STOP_PERIOD) {
2393 /* no buf done come */
2394 msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
2395 pingpong_status, ts);
2396 if (done_buf)
2397 done_buf->is_drop_reconfig = 0;
2398 }
2399 return 0;
2400}
2401
2402/**
2403 * msm_isp_input_disable() - Disable the input for given vfe
2404 * @vfe_dev: The vfe device whose input is to be disabled
2405 *
2406 * Returns - void
2407 *
2408 * If stream count on an input line is 0 then disable the input
2409 */
2410static void msm_isp_input_disable(struct vfe_device *vfe_dev, int cmd_type)
2411{
2412 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
2413 int stream_count;
2414 int total_stream_count = 0;
2415 int i;
2416 struct msm_vfe_src_info *src_info;
2417 int ext_read =
2418 (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
2419
2420 for (i = 0; i < VFE_SRC_MAX; i++)
2421 total_stream_count += axi_data->src_info[i].stream_count +
2422 axi_data->src_info[i].raw_stream_count;
2423
2424 for (i = 0; i < VFE_SRC_MAX; i++) {
2425 stream_count = axi_data->src_info[i].stream_count +
2426 axi_data->src_info[i].raw_stream_count;
2427 if (stream_count)
2428 continue;
2429 if (axi_data->src_info[i].active == 0)
2430 continue;
2431 /* deactivate the input line */
2432 axi_data->src_info[i].active = 0;
2433 src_info = &axi_data->src_info[i];
2434
2435 if (src_info->dual_hw_type == DUAL_HW_MASTER_SLAVE) {
2436 struct master_slave_resource_info *ms_res =
2437 &vfe_dev->common_data->ms_resource;
2438 unsigned long flags;
2439
2440 spin_lock_irqsave(
2441 &vfe_dev->common_data->common_dev_data_lock,
2442 flags);
2443 if (src_info->dual_hw_ms_info.index ==
2444 ms_res->master_index)
2445 ms_res->master_index = -1;
2446 if (src_info->dual_hw_ms_info.index ==
2447 ms_res->primary_slv_idx)
2448 ms_res->primary_slv_idx = -1;
2449 ms_res->active_src_mask &= ~(1 <<
2450 src_info->dual_hw_ms_info.index);
2451 ms_res->src_sof_mask &= ~(1 <<
2452 src_info->dual_hw_ms_info.index);
2453 ms_res->src_info[src_info->dual_hw_ms_info.index] =
2454 NULL;
2455 ms_res->num_src--;
2456 if (ms_res->num_src == 0)
2457 ms_res->dual_sync_mode = MSM_ISP_DUAL_CAM_ASYNC;
2458 src_info->dual_hw_ms_info.sync_state =
2459 MSM_ISP_DUAL_CAM_ASYNC;
2460 src_info->dual_hw_type = DUAL_NONE;
2461 src_info->dual_hw_ms_info.index = -1;
2462 spin_unlock_irqrestore(
2463 &vfe_dev->common_data->common_dev_data_lock,
2464 flags);
2465 }
2466 if (i != VFE_PIX_0 || ext_read)
2467 continue;
2468 if (total_stream_count == 0 || cmd_type == STOP_IMMEDIATELY)
2469 vfe_dev->hw_info->vfe_ops.core_ops.
2470 update_camif_state(vfe_dev,
2471 DISABLE_CAMIF_IMMEDIATELY);
2472 else
2473 vfe_dev->hw_info->vfe_ops.core_ops.
2474 update_camif_state(vfe_dev,
2475 DISABLE_CAMIF);
2476 }
2477 /*
2478 * halt and reset hardware if all streams are disabled, in this case
2479 * ispif is halted immediately as well
2480 */
2481 if (total_stream_count == 0) {
2482 vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2483 msm_isp_flush_tasklet(vfe_dev);
2484 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
2485 if (msm_vfe_is_vfe48(vfe_dev))
2486 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
2487 0, 1);
2488 vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2489 }
2490
2491}
2492
2493/**
2494 * msm_isp_input_enable() - Enable the input for given vfe
2495 * @vfe_dev: The vfe device whose input is to be enabled
2496 *
2497 * Returns - void
2498 *
2499 * Enable inout line if it is not enabled
2500 */
2501static void msm_isp_input_enable(struct vfe_device *vfe_dev,
2502 int sync_frame_id_src)
2503{
2504 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
2505 int ext_read =
2506 (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
2507 int stream_count;
2508 int i;
2509
2510 for (i = 0; i < VFE_SRC_MAX; i++) {
2511 stream_count = axi_data->src_info[i].stream_count +
2512 axi_data->src_info[i].raw_stream_count;
2513 if (stream_count == 0)
2514 continue;
2515 if (axi_data->src_info[i].active)
2516 continue;
2517 /* activate the input since it is deactivated */
2518 axi_data->src_info[i].frame_id = 0;
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +05302519 vfe_dev->irq_sof_id = 0;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302520 if (axi_data->src_info[i].input_mux != EXTERNAL_READ)
2521 axi_data->src_info[i].active = 1;
2522 if (i >= VFE_RAW_0 && sync_frame_id_src) {
2523 /*
2524 * Incase PIX and RDI streams are part
2525 * of same session, this will ensure
2526 * RDI stream will have same frame id
2527 * as of PIX stream
2528 */
2529 axi_data->src_info[i].frame_id =
2530 axi_data->src_info[VFE_PIX_0].frame_id;
2531 }
2532 /* when start reset overflow state and cfg ub for this intf */
2533 vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev, i);
2534 atomic_set(&vfe_dev->error_info.overflow_state,
2535 NO_OVERFLOW);
2536 if (i != VFE_PIX_0 || ext_read)
2537 continue;
2538 /* for camif input the camif needs enabling */
2539 vfe_dev->hw_info->vfe_ops.core_ops.
2540 update_camif_state(vfe_dev, ENABLE_CAMIF);
2541 }
2542}
2543
2544/**
2545 * msm_isp_update_intf_stream_cnt() - Update the stream count in axi interface
2546 * @stream_info: The stream that is either being enabled/disabled
2547 * @enable: 0 means stream is being disabled, else enabled
2548 *
2549 * Returns - void
2550 */
2551static void msm_isp_update_intf_stream_cnt(
2552 struct msm_vfe_axi_stream *stream_info,
2553 int enable)
2554{
2555 int i;
2556
2557 switch (stream_info->stream_src) {
2558 case PIX_ENCODER:
2559 case PIX_VIEWFINDER:
2560 case PIX_VIDEO:
2561 case IDEAL_RAW:
2562 case RDI_INTF_0:
2563 case RDI_INTF_1:
2564 case RDI_INTF_2:
2565 for (i = 0; i < stream_info->num_isp; i++) {
2566 if (enable)
2567 stream_info->vfe_dev[i]->axi_data.src_info[
2568 SRC_TO_INTF(stream_info->stream_src)].
2569 stream_count++;
2570 else
2571 stream_info->vfe_dev[i]->axi_data.src_info[
2572 SRC_TO_INTF(stream_info->stream_src)].
2573 stream_count--;
2574 }
2575 break;
2576 case CAMIF_RAW:
2577 for (i = 0; i < stream_info->num_isp; i++) {
2578 if (enable)
2579 stream_info->vfe_dev[i]->axi_data.src_info[
2580 SRC_TO_INTF(stream_info->stream_src)].
2581 raw_stream_count++;
2582 else
2583 stream_info->vfe_dev[i]->axi_data.src_info[
2584 SRC_TO_INTF(stream_info->stream_src)].
2585 raw_stream_count--;
2586 }
2587 break;
2588 default:
2589 WARN(1, "Invalid steam src %d\n", stream_info->stream_src);
2590 }
2591}
2592
2593/*Factor in Q2 format*/
2594#define ISP_DEFAULT_FORMAT_FACTOR 6
2595#define ISP_BUS_UTILIZATION_FACTOR 6
2596static int msm_isp_update_stream_bandwidth(
2597 struct msm_vfe_axi_stream *stream_info, int enable)
2598{
2599 int i, rc = 0;
2600 uint64_t total_bandwidth = 0;
2601 int vfe_idx;
2602 struct vfe_device *vfe_dev;
2603
2604 for (i = 0; i < stream_info->num_isp; i++) {
2605 vfe_dev = stream_info->vfe_dev[i];
2606 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
2607 stream_info);
2608 if (enable) {
2609 total_bandwidth =
2610 vfe_dev->total_bandwidth +
2611 stream_info->bandwidth[vfe_idx];
2612 } else {
2613 total_bandwidth = vfe_dev->total_bandwidth -
2614 stream_info->bandwidth[vfe_idx];
2615 }
2616 vfe_dev->total_bandwidth = total_bandwidth;
2617 rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
2618 (total_bandwidth + vfe_dev->hw_info->min_ab),
2619 (total_bandwidth + vfe_dev->hw_info->min_ib));
2620
2621 if (rc < 0)
2622 pr_err("%s: update failed rc %d stream src %d vfe dev %d\n",
2623 __func__, rc, stream_info->stream_src,
2624 vfe_dev->pdev->id);
2625 }
2626 return rc;
2627}
2628
2629int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg)
2630{
2631 int i, rc = 0;
2632 uint32_t intf;
2633 unsigned long flags;
2634 struct msm_vfe_axi_stream *stream_info;
2635 struct msm_vfe_dual_lpm_mode *ab_ib_vote = NULL;
2636
2637 ab_ib_vote = (struct msm_vfe_dual_lpm_mode *)arg;
2638 if (!ab_ib_vote) {
2639 pr_err("%s: ab_ib_vote is NULL !!!\n", __func__);
2640 rc = -1;
2641 return rc;
2642 }
2643 if (ab_ib_vote->num_src >= VFE_AXI_SRC_MAX) {
2644 pr_err("%s: ab_ib_vote num_src is exceeding limit\n",
2645 __func__);
2646 rc = -1;
2647 return rc;
2648 }
2649 if (ab_ib_vote->num_src >= VFE_AXI_SRC_MAX) {
2650 pr_err("%s: ab_ib_vote num_src is exceeding limit\n",
2651 __func__);
2652 rc = -1;
2653 return rc;
2654 }
2655 if (ab_ib_vote->lpm_mode) {
2656 for (i = 0; i < ab_ib_vote->num_src; i++) {
2657 stream_info =
2658 msm_isp_get_stream_common_data(vfe_dev,
2659 ab_ib_vote->stream_src[i]);
2660 if (stream_info == NULL)
2661 continue;
2662 /* loop all stream on current session */
2663 spin_lock_irqsave(&stream_info->lock, flags);
2664 intf = SRC_TO_INTF(stream_info->stream_src);
2665 vfe_dev->axi_data.src_info[intf].lpm =
2666 ab_ib_vote->lpm_mode;
2667 if (stream_info->lpm_mode ||
2668 stream_info->state == INACTIVE) {
2669 spin_unlock_irqrestore(&stream_info->lock,
2670 flags);
2671 continue;
2672 }
2673 stream_info->lpm_mode = ab_ib_vote->lpm_mode;
2674 spin_unlock_irqrestore(&stream_info->lock, flags);
2675 msm_isp_update_stream_bandwidth(stream_info, 0);
2676 }
2677 } else {
2678 for (i = 0; i < ab_ib_vote->num_src; i++) {
2679 stream_info =
2680 msm_isp_get_stream_common_data(vfe_dev,
2681 ab_ib_vote->stream_src[i]);
2682 if (stream_info == NULL)
2683 continue;
2684 spin_lock_irqsave(&stream_info->lock, flags);
2685 intf = SRC_TO_INTF(stream_info->stream_src);
2686 vfe_dev->axi_data.src_info[intf].lpm =
2687 ab_ib_vote->lpm_mode;
2688 if (stream_info->lpm_mode == 0 ||
2689 stream_info->state == INACTIVE) {
2690 spin_unlock_irqrestore(&stream_info->lock,
2691 flags);
2692 continue;
2693 }
2694 stream_info->lpm_mode = 0;
2695 spin_unlock_irqrestore(&stream_info->lock, flags);
2696 msm_isp_update_stream_bandwidth(stream_info, 1);
2697 }
2698 }
2699 return rc;
2700}
2701
2702static int msm_isp_init_stream_ping_pong_reg(
2703 struct msm_vfe_axi_stream *stream_info)
2704{
2705 int rc = 0;
2706
2707 /* Set address for both PING & PO NG register */
2708 rc = msm_isp_cfg_ping_pong_address(
2709 stream_info, VFE_PING_FLAG, NULL);
2710 /* No buffer available on start is not error */
2711 if (rc == -ENOMEM && stream_info->stream_type != BURST_STREAM)
2712 return 0;
2713 if (rc < 0) {
2714 pr_err("%s: No free buffer for ping\n",
2715 __func__);
2716 return rc;
2717 }
2718 if (stream_info->stream_type != BURST_STREAM ||
2719 stream_info->runtime_num_burst_capture > 1) {
2720 rc = msm_isp_cfg_ping_pong_address(
2721 stream_info, VFE_PONG_FLAG, NULL);
2722 /* No buffer available on start is not error */
2723 if (rc == -ENOMEM)
2724 return 0;
2725 }
2726
2727 if (rc < 0) {
2728 pr_err("%s: No free buffer for pong\n",
2729 __func__);
2730 return rc;
2731 }
2732
2733 return rc;
2734}
2735
2736static void msm_isp_get_stream_wm_mask(
2737 struct vfe_device *vfe_dev,
2738 struct msm_vfe_axi_stream *stream_info,
2739 uint32_t *wm_reload_mask)
2740{
2741 int i;
2742 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
2743
2744 for (i = 0; i < stream_info->num_planes; i++)
2745 *wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
2746}
2747
2748int msm_isp_axi_halt(struct vfe_device *vfe_dev,
2749 struct msm_vfe_axi_halt_cmd *halt_cmd)
2750{
2751 int rc = 0;
2752
2753 if (atomic_read(&vfe_dev->error_info.overflow_state) ==
2754 OVERFLOW_DETECTED)
2755 pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
2756 __func__, vfe_dev->pdev->id);
2757
2758 /* take care of pending items in tasklet before halt */
2759 msm_isp_flush_tasklet(vfe_dev);
2760
2761 if (halt_cmd->stop_camif) {
2762 vfe_dev->hw_info->vfe_ops.core_ops.
2763 update_camif_state(vfe_dev,
2764 DISABLE_CAMIF_IMMEDIATELY);
2765 }
2766 rc |= vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
2767 halt_cmd->blocking_halt);
2768
2769 return rc;
2770}
2771
2772int msm_isp_axi_reset(struct vfe_device *vfe_dev,
2773 struct msm_vfe_axi_reset_cmd *reset_cmd)
2774{
2775 int rc = 0, i, k;
2776 struct msm_vfe_axi_stream *stream_info;
2777 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
2778 uint32_t bufq_handle = 0, bufq_id = 0;
2779 struct msm_isp_timestamp timestamp;
2780 struct msm_vfe_frame_request_queue *queue_req;
2781 unsigned long flags;
Ramesh Vc97c71e2019-04-17 10:50:13 +05302782 uint32_t pingpong_status;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302783 int vfe_idx;
Ramesh Vc97c71e2019-04-17 10:50:13 +05302784 uint32_t pingpong_bit = 0;
2785 uint32_t frame_id = 0;
2786 struct timeval *time_stamp;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302787
2788 if (!reset_cmd) {
2789 pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
2790 rc = -1;
2791 return rc;
2792 }
2793
2794 msm_isp_get_timestamp(&timestamp, vfe_dev);
Ramesh Vc97c71e2019-04-17 10:50:13 +05302795 time_stamp = &timestamp.buf_time;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302796
2797 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
2798 stream_info = msm_isp_get_stream_common_data(
2799 vfe_dev, i);
2800 if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
2801 rc = -1;
2802 pr_err("%s invalid stream src = %d\n",
2803 __func__,
2804 stream_info->stream_src);
2805 break;
2806 }
2807 if (stream_info->state == AVAILABLE ||
2808 stream_info->state == INACTIVE)
2809 continue;
2810
2811 /* handle dual stream on ISP_VFE1 turn */
2812 if (stream_info->num_isp > 1 &&
2813 vfe_dev->pdev->id == ISP_VFE0)
2814 continue;
2815
2816 /* set ping pong to scratch before flush */
2817 spin_lock_irqsave(&stream_info->lock, flags);
Ramesh Vc97c71e2019-04-17 10:50:13 +05302818 frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
2819 if (stream_info->controllable_output &&
2820 stream_info->undelivered_request_cnt > 0) {
2821 pingpong_status = VFE_PING_FLAG;
2822 pingpong_bit = (~(pingpong_status >>
2823 stream_info->wm[0][0]) & 0x1);
2824 if (stream_info->buf[pingpong_bit] != NULL) {
2825 msm_isp_process_done_buf(vfe_dev, stream_info,
2826 stream_info->buf[pingpong_bit],
2827 time_stamp,
2828 frame_id);
2829 }
2830 pingpong_status = VFE_PONG_FLAG;
2831 pingpong_bit = (~(pingpong_status >>
2832 stream_info->wm[0][0]) & 0x1);
2833 if (stream_info->buf[pingpong_bit] != NULL) {
2834 msm_isp_process_done_buf(vfe_dev, stream_info,
2835 stream_info->buf[pingpong_bit],
2836 time_stamp,
2837 frame_id);
2838 }
2839 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302840 msm_isp_cfg_stream_scratch(stream_info,
2841 VFE_PING_FLAG);
2842 msm_isp_cfg_stream_scratch(stream_info,
2843 VFE_PONG_FLAG);
2844 stream_info->undelivered_request_cnt = 0;
2845 spin_unlock_irqrestore(&stream_info->lock,
2846 flags);
2847 while (!list_empty(&stream_info->request_q)) {
2848 queue_req = list_first_entry_or_null(
2849 &stream_info->request_q,
2850 struct msm_vfe_frame_request_queue, list);
2851 if (queue_req) {
2852 queue_req->cmd_used = 0;
2853 list_del(&queue_req->list);
2854 }
2855 }
2856 for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX;
2857 bufq_id++) {
2858 bufq_handle = stream_info->bufq_handle[bufq_id];
2859 if (!bufq_handle)
2860 continue;
2861 rc = vfe_dev->buf_mgr->ops->flush_buf(
2862 vfe_dev->buf_mgr,
2863 bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
2864 &timestamp.buf_time,
2865 reset_cmd->frame_id);
2866 if (rc == -EFAULT) {
2867 msm_isp_halt_send_error(vfe_dev,
2868 ISP_EVENT_BUF_FATAL_ERROR);
2869 return rc;
2870 }
2871 }
2872
2873 for (k = 0; k < stream_info->num_isp; k++) {
2874 struct vfe_device *temp_vfe_dev =
2875 stream_info->vfe_dev[k];
2876 vfe_idx = msm_isp_get_vfe_idx_for_stream(
2877 temp_vfe_dev, stream_info);
2878 if (stream_info->num_planes > 1) {
2879 temp_vfe_dev->hw_info->vfe_ops.axi_ops.
2880 cfg_comp_mask(temp_vfe_dev,
2881 stream_info);
2882 } else {
2883 temp_vfe_dev->hw_info->vfe_ops.axi_ops.
2884 cfg_wm_irq_mask(temp_vfe_dev,
2885 stream_info);
2886 }
2887 axi_data = &temp_vfe_dev->axi_data;
2888 axi_data->src_info[SRC_TO_INTF(stream_info->
2889 stream_src)].frame_id =
2890 reset_cmd->frame_id;
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05302891 temp_vfe_dev->irq_sof_id = reset_cmd->frame_id;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302892 }
2893 msm_isp_reset_burst_count_and_frame_drop(
2894 vfe_dev, stream_info);
2895 }
2896
2897 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
2898 0, reset_cmd->blocking);
2899 /*
2900 * call reset a second time for vfe48, calling
2901 * only once causes bus error on camif enable
2902 */
2903 if (msm_vfe_is_vfe48(vfe_dev))
2904 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
2905 0, reset_cmd->blocking);
2906
2907 if (rc < 0)
2908 pr_err("%s Error! reset hw Timed out\n", __func__);
2909
2910 return 0;
2911}
2912
2913int msm_isp_axi_restart(struct vfe_device *vfe_dev,
2914 struct msm_vfe_axi_restart_cmd *restart_cmd)
2915{
2916 int rc = 0, i, k, j;
2917 struct msm_vfe_axi_stream *stream_info;
2918 uint32_t wm_reload_mask[MAX_VFE] = {0, 0};
2919 unsigned long flags;
2920 int vfe_idx;
2921
2922 vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
2923 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
2924 stream_info = msm_isp_get_stream_common_data(
2925 vfe_dev, i);
2926 if (stream_info->state == AVAILABLE ||
2927 stream_info->state == INACTIVE)
2928 continue;
2929 /* handle dual stream on ISP_VFE1 turn */
2930 if (stream_info->num_isp > 1 &&
2931 vfe_dev->pdev->id == ISP_VFE0)
2932 continue;
2933 spin_lock_irqsave(&stream_info->lock, flags);
2934 for (j = 0; j < MSM_ISP_COMP_IRQ_MAX; j++)
2935 stream_info->composite_irq[j] = 0;
2936 for (k = 0; k < stream_info->num_isp; k++) {
2937 struct vfe_device *temp_vfe_dev =
2938 stream_info->vfe_dev[k];
2939 vfe_idx = msm_isp_get_vfe_idx_for_stream(
2940 temp_vfe_dev, stream_info);
2941 for (j = 0; j < stream_info->num_planes; j++)
2942 temp_vfe_dev->hw_info->vfe_ops.axi_ops.
2943 enable_wm(
2944 temp_vfe_dev->vfe_base,
2945 stream_info->wm[vfe_idx][j], 1);
2946 msm_isp_get_stream_wm_mask(temp_vfe_dev, stream_info,
2947 &wm_reload_mask[temp_vfe_dev->pdev->id]);
2948 }
2949 msm_isp_init_stream_ping_pong_reg(stream_info);
2950 spin_unlock_irqrestore(&stream_info->lock, flags);
2951 }
2952
2953 for (k = 0; k < MAX_VFE; k++) {
2954 struct vfe_device *temp_vfe_dev =
2955 vfe_dev->common_data->dual_vfe_res->vfe_dev[k];
2956 if (wm_reload_mask[k])
2957 temp_vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(
2958 temp_vfe_dev,
2959 temp_vfe_dev->vfe_base, wm_reload_mask[k]);
2960 }
2961
2962 vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
2963 restart_cmd->enable_camif);
2964
2965 return rc;
2966}
2967
2968static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev_ioctl,
2969 struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
2970 uint8_t cgc_override)
2971{
2972 int i = 0, j = 0;
2973 struct msm_vfe_axi_stream *stream_info;
2974 int k;
2975 struct vfe_device *vfe_dev;
2976 int vfe_idx;
2977
2978 if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
2979 return -EINVAL;
2980
2981 for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
2982 if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
2983 VFE_AXI_SRC_MAX) {
2984 return -EINVAL;
2985 }
2986 stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
2987 HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
2988 if (!stream_info) {
2989 pr_err("%s: stream_info is NULL", __func__);
2990 return -EINVAL;
2991 }
2992 for (j = 0; j < stream_info->num_planes; j++) {
2993 for (k = 0; k < stream_info->num_isp; k++) {
2994 vfe_dev = stream_info->vfe_dev[k];
2995 if (!vfe_dev->hw_info->vfe_ops.axi_ops.
2996 update_cgc_override)
2997 continue;
2998 vfe_idx = msm_isp_get_vfe_idx_for_stream(
2999 vfe_dev, stream_info);
3000 vfe_dev->hw_info->vfe_ops.axi_ops.
3001 update_cgc_override(vfe_dev,
3002 stream_info->wm[vfe_idx][j],
3003 cgc_override);
3004 }
3005 }
3006 }
3007 return 0;
3008}
3009
3010/**
3011 * msm_isp_axi_wait_for_stream_cfg_done() - Wait for a stream completion
3012 * @stream_info: The stream to wait on
3013 * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
3014 *
3015 * Returns - 0 on success else error code
3016 */
3017static int msm_isp_axi_wait_for_stream_cfg_done(
3018 struct msm_vfe_axi_stream *stream_info, int active)
3019{
3020 int rc = -1;
3021 unsigned long flags;
3022
3023 /* No need to wait if stream is already in required state */
3024 spin_lock_irqsave(&stream_info->lock, flags);
3025 if (active && ACTIVE == stream_info->state)
3026 rc = 0;
3027 if (!active && INACTIVE == stream_info->state)
3028 rc = 0;
3029 spin_unlock_irqrestore(&stream_info->lock, flags);
3030 if (rc == 0)
3031 return rc;
3032
3033 rc = wait_for_completion_timeout(
3034 active ? &stream_info->active_comp :
3035 &stream_info->inactive_comp,
3036 msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
3037
3038 if (rc <= 0) {
3039 rc = rc ? rc : -ETIMEDOUT;
3040 pr_err("%s: wait for stream %x/%x state %d config failed %d\n",
3041 __func__,
3042 stream_info->stream_id,
3043 stream_info->stream_src,
3044 stream_info->state,
3045 rc);
3046 rc = -EINVAL;
3047 } else {
3048 rc = 0;
3049 }
3050 return rc;
3051}
3052
3053/**
3054 * msm_isp_axi_wait_for_streams() - Wait for completion of a number of streams
3055 * @streams: The streams to wait on
3056 * @num_stream: Number of streams to wait on
3057 * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
3058 *
3059 * Returns - 0 on success else error code
3060 */
3061static int msm_isp_axi_wait_for_streams(struct msm_vfe_axi_stream **streams,
3062 int num_stream, int active)
3063{
3064 int i;
3065 int rc = 0;
3066 struct msm_vfe_axi_stream *stream_info;
3067
3068 for (i = 0; i < num_stream; i++) {
3069 stream_info = streams[i];
3070 rc |= msm_isp_axi_wait_for_stream_cfg_done(stream_info, active);
3071 }
3072 return rc;
3073}
3074
3075static int __msm_isp_check_stream_state(struct msm_vfe_axi_stream *stream_info,
3076 int cmd)
3077{
3078 switch (stream_info->state) {
3079 case AVAILABLE:
3080 return -EINVAL;
3081 case PAUSING:
3082 case RESUMING:
3083 case RESUME_PENDING:
3084 case ACTIVE:
3085 case PAUSED:
3086 if (cmd != 0)
3087 return -EALREADY;
3088 break;
3089 case INACTIVE:
3090 if (cmd == 0)
3091 return -EALREADY;
3092 break;
3093 /*
3094 * stream cannot be in following states since we always
3095 * wait in ioctl for stream to be active or inactive
3096 */
3097 case UPDATING:
3098 case START_PENDING:
3099 case STARTING:
3100 case STOPPING:
3101 case STOP_PENDING:
3102 case PAUSE_PENDING:
3103 default:
3104 WARN(1, "Invalid state %d\n", stream_info->state);
3105 }
3106 return 0;
3107}
3108
3109
3110static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
3111 struct msm_vfe_axi_stream **streams, int num_streams, int cmd_type)
3112{
3113 int i;
3114 struct msm_vfe_axi_shared_data *axi_data;
3115 struct msm_isp_timestamp timestamp;
3116 uint32_t bufq_id = 0, bufq_handle = 0;
3117 struct msm_vfe_axi_stream *stream_info;
3118 unsigned long flags;
3119 uint32_t intf;
3120 int rc;
3121 struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
3122 int k;
3123
3124 msm_isp_get_timestamp(&timestamp, vfe_dev);
3125
3126 for (i = 0; i < num_streams; i++) {
3127 stream_info = streams[i];
3128 msm_isp_update_intf_stream_cnt(stream_info, 0);
3129 for (k = 0; k < stream_info->num_isp; k++) {
3130 vfe_dev = stream_info->vfe_dev[k];
3131 update_vfes[vfe_dev->pdev->id] = vfe_dev;
3132 }
3133 }
3134 for (k = 0; k < MAX_VFE; k++) {
3135 if (!update_vfes[k])
3136 continue;
3137 msm_isp_input_disable(update_vfes[k], cmd_type);
3138 }
3139
3140 for (i = 0; i < num_streams; i++) {
3141 stream_info = streams[i];
3142 spin_lock_irqsave(&stream_info->lock, flags);
3143 /*
3144 * since we can get here from start axi stream error path due
3145 * to which the stream may be intermittent state like
3146 * STARTING/START_PENDING, force the stream to move out of
3147 * intermittent state so it can be made INACTIVE. The
3148 * intermittent states update variables so better to go through
3149 * those state transitions instead of directly forcing stream to
3150 * be INACTIVE
3151 */
3152 memset(&stream_info->sw_skip, 0,
3153 sizeof(struct msm_isp_sw_framskip));
3154 intf = SRC_TO_INTF(stream_info->stream_src);
3155 if (stream_info->lpm_mode == 0 &&
3156 stream_info->state != PAUSED) {
3157 while (stream_info->state != ACTIVE)
3158 __msm_isp_axi_stream_update(stream_info,
3159 &timestamp);
3160 }
3161 msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
3162 msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
3163 stream_info->undelivered_request_cnt = 0;
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +05303164 if (stream_info->controllable_output &&
3165 stream_info->pending_buf_info.is_buf_done_pending) {
3166 msm_isp_free_pending_buffer(vfe_dev, stream_info,
3167 &timestamp);
3168 stream_info->pending_buf_info.is_buf_done_pending = 0;
3169 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303170 for (k = 0; k < stream_info->num_isp; k++) {
3171 vfe_dev = stream_info->vfe_dev[k];
3172 if (stream_info->num_planes > 1)
3173 vfe_dev->hw_info->vfe_ops.axi_ops.
3174 clear_comp_mask(vfe_dev, stream_info);
3175 else
3176 vfe_dev->hw_info->vfe_ops.axi_ops.
3177 clear_wm_irq_mask(vfe_dev, stream_info);
3178 }
3179 init_completion(&stream_info->inactive_comp);
3180 stream_info->state = STOP_PENDING;
3181 if (stream_info->lpm_mode ||
3182 stream_info->state == PAUSED) {
3183 /* don't wait for reg update */
3184 while (stream_info->state != INACTIVE)
3185 __msm_isp_axi_stream_update(stream_info,
3186 &timestamp);
3187 }
3188 spin_unlock_irqrestore(&stream_info->lock, flags);
3189 }
3190
3191 for (k = 0; k < MAX_VFE; k++) {
3192 if (!update_vfes[k])
3193 continue;
3194 vfe_dev = update_vfes[k];
3195 /* make sure all stats are stopped if camif is stopped */
3196 if (vfe_dev->axi_data.src_info[VFE_PIX_0].active == 0)
3197 msm_isp_stop_all_stats_stream(vfe_dev);
3198 }
3199
3200 for (i = 0; i < num_streams; i++) {
3201 stream_info = streams[i];
3202 spin_lock_irqsave(&stream_info->lock, flags);
3203 intf = SRC_TO_INTF(stream_info->stream_src);
3204 if (((stream_info->stream_type == BURST_STREAM) &&
3205 stream_info->runtime_num_burst_capture == 0) ||
3206 (stream_info->vfe_dev[0]->axi_data.src_info[intf].
3207 active == 0)) {
3208 while (stream_info->state != INACTIVE)
3209 __msm_isp_axi_stream_update(
3210 stream_info, &timestamp);
3211 }
3212 spin_unlock_irqrestore(&stream_info->lock, flags);
3213 }
3214
3215 rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
3216 if (rc) {
3217 pr_err("%s: wait for stream comp failed, retry...\n", __func__);
3218 for (i = 0; i < num_streams; i++) {
3219 stream_info = streams[i];
3220 if (stream_info->state == INACTIVE)
3221 continue;
3222 spin_lock_irqsave(&stream_info->lock, flags);
3223 __msm_isp_axi_stream_update(stream_info,
3224 &timestamp);
3225 spin_unlock_irqrestore(&stream_info->lock, flags);
3226 }
3227 rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
3228 if (rc) {
3229 pr_err("%s: wait for stream comp failed, force streams to inactive\n",
3230 __func__);
3231 for (i = 0; i < num_streams; i++) {
3232 stream_info = streams[i];
3233 if (stream_info->state == INACTIVE)
3234 continue;
3235 spin_lock_irqsave(&stream_info->lock, flags);
3236 while (stream_info->state != INACTIVE)
3237 __msm_isp_axi_stream_update(
3238 stream_info, &timestamp);
3239 spin_unlock_irqrestore(&stream_info->lock,
3240 flags);
3241 }
3242 }
3243 }
3244 /* clear buffers that are dequeued */
3245 for (i = 0; i < num_streams; i++) {
3246 stream_info = streams[i];
3247 if (stream_info->lpm_mode == 0)
3248 msm_isp_update_stream_bandwidth(stream_info, 0);
3249 for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
3250 bufq_handle = stream_info->bufq_handle[bufq_id];
3251 if (!bufq_handle)
3252 continue;
3253 vfe_dev = stream_info->vfe_dev[0];
3254 rc = vfe_dev->buf_mgr->ops->flush_buf(
3255 vfe_dev->buf_mgr,
3256 bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
3257 &timestamp.buf_time, 0);
3258 if (rc == -EFAULT)
3259 msm_isp_halt_send_error(vfe_dev,
3260 ISP_EVENT_BUF_FATAL_ERROR);
3261 }
3262 }
3263
3264 for (i = 0; i < num_streams; i++) {
3265 stream_info = streams[i];
3266 intf = SRC_TO_INTF(stream_info->stream_src);
3267 for (k = 0; k < stream_info->num_isp; k++) {
3268 vfe_dev = stream_info->vfe_dev[k];
3269 axi_data = &vfe_dev->axi_data;
3270 if (axi_data->src_info[intf].stream_count == 0)
3271 vfe_dev->reg_update_requested &=
3272 ~(BIT(intf));
3273 }
3274 }
3275}
3276
3277static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
3278 struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
3279{
3280 int i, rc = 0;
3281 uint8_t src_state;
3282 uint32_t wm_reload_mask[MAX_VFE] = {0, 0};
3283 struct msm_vfe_axi_stream *stream_info;
3284 uint32_t src_mask = 0;
3285 unsigned long flags;
3286 struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
3287 int num_streams = 0;
3288 struct msm_isp_timestamp timestamp;
3289 struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
3290 int k;
3291 struct vfe_device *vfe_dev;
3292 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev_ioctl->axi_data;
3293 uint32_t intf;
3294
3295 if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
3296 return -EINVAL;
3297
3298 msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
Meera Gande044d53f2018-04-13 16:19:34 +05303299 mutex_lock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303300 for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
3301 if (stream_cfg_cmd->stream_handle[i] == 0)
3302 continue;
3303 stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
3304 HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
3305 if (!stream_info) {
3306 pr_err("%s: stream_info is NULL", __func__);
Meera Gande044d53f2018-04-13 16:19:34 +05303307 mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303308 return -EINVAL;
3309 }
3310 if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
3311 src_state = axi_data->src_info[
3312 SRC_TO_INTF(stream_info->stream_src)].active;
3313
3314 else {
3315 ISP_DBG("%s: invalid src info index\n", __func__);
3316 rc = -EINVAL;
Meera Gande044d53f2018-04-13 16:19:34 +05303317 mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303318 goto error;
3319 }
3320 spin_lock_irqsave(&stream_info->lock, flags);
3321 rc = __msm_isp_check_stream_state(stream_info, 1);
3322 if (-EALREADY == rc) {
3323 rc = 0;
3324 spin_unlock_irqrestore(&stream_info->lock, flags);
3325 continue;
3326 }
3327 if (rc) {
3328 spin_unlock_irqrestore(&stream_info->lock, flags);
Meera Gande044d53f2018-04-13 16:19:34 +05303329 mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303330 goto error;
3331 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303332 msm_isp_calculate_bandwidth(stream_info);
3333 for (k = 0; k < stream_info->num_isp; k++) {
3334 msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
3335 stream_info, &wm_reload_mask[
3336 stream_info->vfe_dev[k]->pdev->id]);
3337 src_state = stream_info->vfe_dev[k]->axi_data.src_info[
3338 SRC_TO_INTF(stream_info->stream_src)].active;
3339 if (update_vfes[stream_info->vfe_dev[k]->pdev->id])
3340 continue;
3341 update_vfes[stream_info->vfe_dev[k]->pdev->id] =
3342 stream_info->vfe_dev[k];
3343 }
3344 msm_isp_reset_framedrop(vfe_dev_ioctl, stream_info);
3345 rc = msm_isp_init_stream_ping_pong_reg(stream_info);
3346 if (rc < 0) {
Ramesh Vc97c71e2019-04-17 10:50:13 +05303347 pr_err("%s: No buffer for stream%x\n", __func__,
3348 stream_info->stream_id);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303349 spin_unlock_irqrestore(&stream_info->lock, flags);
Meera Gande044d53f2018-04-13 16:19:34 +05303350 mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303351 goto error;
3352 }
3353 for (k = 0; k < stream_info->num_isp; k++) {
3354 vfe_dev = stream_info->vfe_dev[k];
3355 if (stream_info->num_planes > 1) {
3356 vfe_dev->hw_info->vfe_ops.axi_ops.
3357 cfg_comp_mask(vfe_dev, stream_info);
3358 } else {
3359 vfe_dev->hw_info->vfe_ops.axi_ops.
3360 cfg_wm_irq_mask(vfe_dev, stream_info);
3361 }
3362 }
3363 intf = SRC_TO_INTF(stream_info->stream_src);
3364 stream_info->lpm_mode = vfe_dev_ioctl->
3365 axi_data.src_info[intf].lpm;
3366 if (stream_info->lpm_mode == 0) {
3367 spin_unlock_irqrestore(&stream_info->lock, flags);
3368 msm_isp_update_stream_bandwidth(stream_info, 1);
3369 spin_lock_irqsave(&stream_info->lock, flags);
3370 }
3371 init_completion(&stream_info->active_comp);
3372 stream_info->state = START_PENDING;
3373 msm_isp_update_intf_stream_cnt(stream_info, 1);
3374
3375 ISP_DBG("%s, Stream 0x%x src_state %d on vfe %d\n", __func__,
3376 stream_info->stream_src, src_state,
3377 vfe_dev_ioctl->pdev->id);
3378 if (src_state) {
3379 src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src));
3380 if (stream_info->lpm_mode) {
3381 while (stream_info->state != ACTIVE)
3382 __msm_isp_axi_stream_update(
3383 stream_info, &timestamp);
3384 }
3385 } else {
3386 for (k = 0; k < stream_info->num_isp; k++) {
3387 vfe_dev = stream_info->vfe_dev[k];
3388
3389 if (vfe_dev->dump_reg)
3390 msm_camera_io_dump(vfe_dev->vfe_base,
3391 0x1000, 1);
3392 }
3393
3394 /* Configure AXI start bits to start immediately */
3395 while (stream_info->state != ACTIVE)
3396 __msm_isp_axi_stream_update(
3397 stream_info, &timestamp);
3398
3399 for (k = 0; k < stream_info->num_isp; k++) {
3400 vfe_dev = stream_info->vfe_dev[k];
3401 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
3402 vfe_dev,
3403 SRC_TO_INTF(stream_info->stream_src));
3404 }
3405 }
3406 spin_unlock_irqrestore(&stream_info->lock, flags);
3407 streams[num_streams++] = stream_info;
3408 }
Meera Gande044d53f2018-04-13 16:19:34 +05303409 mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303410
3411 for (i = 0; i < MAX_VFE; i++) {
3412 vfe_dev = update_vfes[i];
3413 if (!vfe_dev)
3414 continue;
3415 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
3416 vfe_dev->vfe_base, wm_reload_mask[i]);
3417
3418 msm_isp_input_enable(vfe_dev,
3419 stream_cfg_cmd->sync_frame_id_src);
3420 }
3421
3422 rc = msm_isp_axi_wait_for_streams(streams, num_streams, 1);
3423 if (rc < 0) {
3424 pr_err("%s: wait for config done failed\n", __func__);
3425 goto error;
3426 }
3427
3428 return 0;
3429error:
3430 __msm_isp_stop_axi_streams(vfe_dev_ioctl, streams, num_streams,
3431 STOP_STREAM);
3432
3433 return rc;
3434}
3435
3436static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl,
3437 struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
3438{
3439 int i, rc = 0;
3440 struct msm_vfe_axi_stream *stream_info = NULL;
3441 struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
3442 int num_streams = 0;
3443 unsigned long flags;
3444
3445 if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
3446 stream_cfg_cmd->num_streams == 0)
3447 return -EINVAL;
3448
3449 for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
3450 if (stream_cfg_cmd->stream_handle[i] == 0)
3451 continue;
3452 stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
3453 HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
3454 if (!stream_info) {
3455 pr_err("%s: stream_info is NULL", __func__);
3456 return -EINVAL;
3457 }
3458 spin_lock_irqsave(&stream_info->lock, flags);
3459 rc = __msm_isp_check_stream_state(stream_info, 0);
3460 spin_unlock_irqrestore(&stream_info->lock, flags);
3461 if (rc) {
3462 /*
3463 * continue stopping other streams as error here means
3464 * stream is already not active
3465 */
3466 rc = 0;
3467 continue;
3468 }
3469 streams[num_streams++] = stream_info;
3470 }
3471 __msm_isp_stop_axi_streams(vfe_dev_ioctl, streams, num_streams,
3472 stream_cfg_cmd->cmd);
3473
3474 return rc;
3475}
3476
3477int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
3478{
3479 int rc = 0;
3480 struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
3481 uint32_t stream_idx[MAX_NUM_STREAM];
3482 int i;
3483 int vfe_idx;
3484 struct msm_vfe_axi_stream *stream_info;
3485
3486 memset(stream_idx, 0, sizeof(stream_idx));
3487
3488 for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
3489 if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
3490 VFE_AXI_SRC_MAX)
3491 return -EINVAL;
3492 stream_info = msm_isp_get_stream_common_data(vfe_dev,
3493 HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
3494 if (!stream_info) {
3495 pr_err("%s: stream_info is NULL", __func__);
3496 return -EINVAL;
3497 }
3498 vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev,
3499 stream_info);
3500 if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
3501 stream_cfg_cmd->stream_handle[i]) {
3502 pr_err("%s: Invalid stream handle %x vfe_idx %d expected %x\n",
3503 __func__, stream_cfg_cmd->stream_handle[i],
3504 vfe_idx,
3505 (vfe_idx != -ENOTTY) ?
3506 stream_info->stream_handle[vfe_idx] : 0);
3507 return -EINVAL;
3508 }
3509 /* check for duplicate stream handle */
3510 if (stream_idx[stream_info->stream_src] ==
3511 stream_cfg_cmd->stream_handle[i])
3512 stream_cfg_cmd->stream_handle[i] = 0;
3513 else
3514 stream_idx[stream_info->stream_src] =
3515 stream_cfg_cmd->stream_handle[i];
3516 }
3517 if (stream_cfg_cmd->cmd == START_STREAM) {
3518 msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
3519
3520 rc = msm_isp_start_axi_stream(
3521 vfe_dev, stream_cfg_cmd);
3522 } else {
3523 rc = msm_isp_stop_axi_stream(
3524 vfe_dev, stream_cfg_cmd);
3525
3526 msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
3527
3528 /*
3529 * Use different ret value to not overwrite the error from
3530 * msm_isp_stop_axi_stream
3531 */
3532 if (vfe_dev->axi_data.num_active_stream == 0)
3533 vfe_dev->hvx_cmd = HVX_DISABLE;
3534 if (vfe_dev->is_split) {
3535 struct vfe_device *vfe_temp =
3536 vfe_dev->common_data->
3537 dual_vfe_res->vfe_dev[ISP_VFE0];
3538 if (vfe_temp->axi_data.num_active_stream == 0)
3539 vfe_temp->hvx_cmd = HVX_DISABLE;
3540 }
3541 }
3542
3543 if (rc < 0)
3544 pr_err("%s: start/stop %d stream failed\n", __func__,
3545 stream_cfg_cmd->cmd);
3546 return rc;
3547}
3548
3549static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
3550 struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
3551 uint32_t frame_id, uint32_t buf_index,
3552 enum msm_vfe_input_src frame_src)
3553{
3554 int rc = -1;
3555 struct msm_isp_buffer *buf = NULL;
3556 uint32_t bufq_handle = 0;
3557 uint32_t stream_idx;
3558 struct msm_isp_event_data error_event;
3559 struct msm_isp_timestamp timestamp;
3560
3561 if (!vfe_dev || !stream_info) {
3562 pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
3563 __LINE__, vfe_dev, stream_info);
3564 return -EINVAL;
3565 }
3566
3567 stream_idx = stream_info->stream_src;
3568 if (!stream_info->controllable_output)
3569 return -EINVAL;
3570
3571 if (frame_src >= VFE_SRC_MAX) {
3572 pr_err("%s: Invalid frame_src %d", __func__, frame_src);
3573 return -EINVAL;
3574 }
3575
3576 if (stream_idx >= VFE_AXI_SRC_MAX) {
3577 pr_err("%s: Invalid stream_idx", __func__);
3578 return rc;
3579 }
3580
3581 if (user_stream_id == stream_info->stream_id)
3582 bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
3583 else
3584 bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_SHARED];
3585
3586
3587 rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
3588 vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
3589 if (rc == -EFAULT) {
3590 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
3591 return rc;
3592 }
3593
3594 if (rc < 0 || buf == NULL) {
3595 pr_err("Skip framedrop report due to no buffer\n");
3596 return rc;
3597 }
3598
3599 msm_isp_get_timestamp(&timestamp, vfe_dev);
3600 buf->buf_debug.put_state[buf->buf_debug.put_state_last] =
3601 MSM_ISP_BUFFER_STATE_DROP_REG;
3602 buf->buf_debug.put_state_last ^= 1;
3603 rc = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
3604 buf->bufq_handle, buf->buf_idx,
3605 &timestamp.buf_time, frame_id,
3606 stream_info->runtime_output_format);
3607 if (rc == -EFAULT) {
3608 msm_isp_halt_send_error(vfe_dev,
3609 ISP_EVENT_BUF_FATAL_ERROR);
3610 return rc;
3611 }
3612
3613 memset(&error_event, 0, sizeof(error_event));
3614 error_event.frame_id = frame_id;
3615 error_event.u.error_info.err_type = ISP_ERROR_RETURN_EMPTY_BUFFER;
3616 error_event.u.error_info.session_id = stream_info->session_id;
3617 error_event.u.error_info.stream_id_mask =
3618 1 << (bufq_handle & 0xFF);
3619 msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR, &error_event);
3620
3621 return 0;
3622}
3623
3624static int msm_isp_request_frame(struct vfe_device *vfe_dev,
3625 struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
3626 uint32_t frame_id, uint32_t buf_index)
3627{
3628 struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd;
3629 struct msm_vfe_frame_request_queue *queue_req;
3630 uint32_t pingpong_status;
3631 unsigned long flags;
3632 int rc = 0;
3633 enum msm_vfe_input_src frame_src = 0;
3634 int k;
3635 uint32_t wm_mask = 0;
3636 int vfe_idx;
3637 uint32_t pingpong_bit = 0;
3638
3639 if (!vfe_dev || !stream_info) {
3640 pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
3641 __LINE__, vfe_dev, stream_info);
3642 return -EINVAL;
3643 }
3644
3645 /* return early for dual vfe0 */
3646 if (stream_info->num_isp > 1 && vfe_dev->pdev->id == ISP_VFE0)
3647 return 0;
3648
3649 if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
3650 pr_err("%s:%d invalid stream src %d\n", __func__, __LINE__,
3651 stream_info->stream_src);
3652 return -EINVAL;
3653 }
3654
3655 frame_src = SRC_TO_INTF(stream_info->stream_src);
3656 pingpong_status = vfe_dev->hw_info->
3657 vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
Meera Gandeecd742032018-10-11 15:32:51 +05303658
3659 /* As MCT is still processing it, need to drop the additional requests*/
Srikanth Uyyala7b9010a2019-05-03 10:17:03 +05303660 if (vfe_dev->isp_page->drop_reconfig &&
3661 frame_src == VFE_PIX_0) {
Meera Gandeecd742032018-10-11 15:32:51 +05303662 pr_err("%s: MCT has not yet delayed %d drop request %d\n",
3663 __func__, vfe_dev->isp_page->drop_reconfig, frame_id);
3664 goto error;
3665 }
3666
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303667 /*
3668 * If PIX stream is active then RDI path uses SOF frame ID of PIX
3669 * In case of standalone RDI streaming, SOF are used from
3670 * individual intf.
3671 */
3672 /*
3673 * If frame_id = 1 then no eof check is needed
3674 */
3675 if (vfe_dev->axi_data.src_info[frame_src].active &&
3676 frame_src == VFE_PIX_0 &&
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05303677 vfe_dev->axi_data.src_info[frame_src].accept_frame == false &&
3678 (stream_info->undelivered_request_cnt <=
3679 MAX_BUFFERS_IN_HW)
3680 ) {
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303681 pr_debug("%s:%d invalid time to request frame %d try drop_reconfig\n",
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303682 __func__, __LINE__, frame_id);
Meera Gandeecd742032018-10-11 15:32:51 +05303683 vfe_dev->isp_page->drop_reconfig = 1;
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303684 return 0;
Meera Gandeecd742032018-10-11 15:32:51 +05303685 } else if ((vfe_dev->axi_data.src_info[frame_src].active) &&
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05303686 ((frame_id ==
3687 vfe_dev->axi_data.src_info[frame_src].frame_id) ||
3688 (frame_id == vfe_dev->irq_sof_id)) &&
Meera Gandeecd742032018-10-11 15:32:51 +05303689 (stream_info->undelivered_request_cnt <=
3690 MAX_BUFFERS_IN_HW)) {
3691 vfe_dev->isp_page->drop_reconfig = 1;
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303692 pr_debug("%s: vfe_%d request_frame %d cur frame id %d pix %d try drop_reconfig\n",
Meera Gandeecd742032018-10-11 15:32:51 +05303693 __func__, vfe_dev->pdev->id, frame_id,
3694 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
3695 vfe_dev->axi_data.src_info[VFE_PIX_0].active);
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303696 return 0;
Meera Gandeecd742032018-10-11 15:32:51 +05303697 } else if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303698 vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
3699 axi_data.src_info[frame_src].sof_counter_step)) ||
3700 ((!vfe_dev->axi_data.src_info[frame_src].active))) {
3701 pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n",
3702 __func__, __LINE__, frame_id,
3703 vfe_dev->axi_data.src_info[frame_src].frame_id,
3704 vfe_dev->axi_data.src_info[frame_src].active);
3705 goto error;
3706 }
3707 if (stream_info->undelivered_request_cnt >= MAX_BUFFERS_IN_HW) {
3708 pr_debug("%s:%d invalid undelivered_request_cnt %d frame id %d\n",
3709 __func__, __LINE__,
3710 stream_info->undelivered_request_cnt,
3711 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
3712 goto error;
3713 }
3714 if ((frame_src == VFE_PIX_0) && !stream_info->undelivered_request_cnt &&
3715 MSM_VFE_STREAM_STOP_PERIOD !=
3716 stream_info->activated_framedrop_period) {
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303717 /* wm is reloaded if undelivered_request_cnt is zero.
3718 * As per the hw behavior wm should be disabled or skip writing
3719 * before reload happens other wise wm could start writing from
3720 * middle of the frame and could result in image corruption.
3721 * instead of dropping frame in this error scenario use
3722 * drop_reconfig flag to process the request in next sof.
3723 */
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303724 pr_debug("%s:%d vfe %d frame_id %d prev_pattern %x stream_id %x\n",
3725 __func__, __LINE__, vfe_dev->pdev->id, frame_id,
3726 stream_info->activated_framedrop_period,
3727 stream_info->stream_id);
Srikanth Uyyalaf4c6a3a2019-07-12 16:11:46 +05303728 vfe_dev->isp_page->drop_reconfig = 1;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303729 return 0;
3730 }
3731
3732 spin_lock_irqsave(&stream_info->lock, flags);
3733 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
3734 /* When wm reloaded, pingpong status register would be stale, pingpong
3735 * status would be updated only after AXI_DONE interrupt processed.
3736 * So, we should avoid reading value from pingpong status register
3737 * until buf_done happens for ping buffer.
3738 */
3739 if ((stream_info->undelivered_request_cnt == 1) &&
3740 (stream_info->sw_ping_pong_bit != -1)) {
3741 pingpong_status =
3742 vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
3743 vfe_dev);
3744 pingpong_bit = ((pingpong_status >>
3745 stream_info->wm[vfe_idx][0]) & 0x1);
3746 if (stream_info->sw_ping_pong_bit == !pingpong_bit) {
3747 ISP_DBG("%s:Return Empty Buffer stream id 0x%X\n",
3748 __func__, stream_info->stream_id);
3749 rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
3750 user_stream_id, frame_id, buf_index,
3751 frame_src);
3752 spin_unlock_irqrestore(&stream_info->lock,
3753 flags);
3754 return 0;
3755 }
3756 }
3757
3758 queue_req = &stream_info->request_queue_cmd[stream_info->request_q_idx];
3759 if (queue_req->cmd_used) {
3760 spin_unlock_irqrestore(&stream_info->lock, flags);
3761 pr_err_ratelimited("%s: Request queue overflow.\n", __func__);
3762 return -EINVAL;
3763 }
3764
3765 if (user_stream_id == stream_info->stream_id)
3766 queue_req->buff_queue_id = VFE_BUF_QUEUE_DEFAULT;
3767 else
3768 queue_req->buff_queue_id = VFE_BUF_QUEUE_SHARED;
3769
3770 if (!stream_info->bufq_handle[queue_req->buff_queue_id]) {
3771 spin_unlock_irqrestore(&stream_info->lock, flags);
3772 pr_err("%s:%d request frame failed on hw stream 0x%x, request stream %d due to no bufq idx: %d\n",
3773 __func__, __LINE__,
3774 stream_info->stream_handle[0],
3775 user_stream_id, queue_req->buff_queue_id);
3776 return 0;
3777 }
3778 queue_req->buf_index = buf_index;
3779 queue_req->cmd_used = 1;
3780
3781 stream_info->request_q_idx =
3782 (stream_info->request_q_idx + 1) % MSM_VFE_REQUESTQ_SIZE;
3783 list_add_tail(&queue_req->list, &stream_info->request_q);
3784 stream_info->request_q_cnt++;
3785
3786 stream_info->undelivered_request_cnt++;
3787 stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle[vfe_idx];
3788 stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
3789 stream_cfg_cmd.init_frame_drop = 0;
3790 stream_cfg_cmd.burst_count = stream_info->request_q_cnt;
3791
3792 if (stream_info->undelivered_request_cnt == 1) {
3793 rc = msm_isp_cfg_ping_pong_address(stream_info,
3794 VFE_PING_FLAG, NULL);
3795 if (rc) {
3796 spin_unlock_irqrestore(&stream_info->lock, flags);
3797 stream_info->undelivered_request_cnt--;
Ramesh Vc97c71e2019-04-17 10:50:13 +05303798 queue_req = list_first_entry_or_null(
3799 &stream_info->request_q,
3800 struct msm_vfe_frame_request_queue, list);
3801 if (queue_req) {
3802 queue_req->cmd_used = 0;
3803 list_del(&queue_req->list);
3804 stream_info->request_q_cnt--;
3805 }
3806 pr_err_ratelimited("%s:%d fail to cfg HAL buffer stream %x\n",
3807 __func__, __LINE__, stream_info->stream_id);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303808 return rc;
3809 }
3810
3811 for (k = 0; k < stream_info->num_isp; k++) {
3812 wm_mask = 0;
3813 msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
3814 stream_info, &wm_mask);
3815 stream_info->vfe_dev[k]->
3816 hw_info->vfe_ops.axi_ops.reload_wm(
3817 stream_info->vfe_dev[k],
3818 stream_info->vfe_dev[k]->vfe_base, wm_mask);
3819
3820 }
3821 /* sw_ping_pong_bit is updated only when AXI_DONE.
3822 * so now reset this bit to -1.
3823 */
3824 stream_info->sw_ping_pong_bit = -1;
3825 } else if (stream_info->undelivered_request_cnt == 2) {
3826 if (stream_info->sw_ping_pong_bit == -1) {
3827 /* This means wm is reloaded & ping buffer is
3828 * already configured. And AXI_DONE for ping
3829 * is still pending. So, config pong buffer
3830 * now.
3831 */
3832 rc = msm_isp_cfg_ping_pong_address(stream_info,
3833 VFE_PONG_FLAG, NULL);
3834 } else {
3835 rc = msm_isp_cfg_ping_pong_address(
3836 stream_info, pingpong_status, NULL);
3837 }
3838 if (rc) {
3839 stream_info->undelivered_request_cnt--;
3840 spin_unlock_irqrestore(&stream_info->lock,
3841 flags);
Ramesh Vc97c71e2019-04-17 10:50:13 +05303842 queue_req = list_first_entry_or_null(
3843 &stream_info->request_q,
3844 struct msm_vfe_frame_request_queue, list);
3845 if (queue_req) {
3846 queue_req->cmd_used = 0;
3847 list_del(&queue_req->list);
3848 stream_info->request_q_cnt--;
3849 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303850 pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
3851 __func__, __LINE__);
3852 return rc;
3853 }
3854 } else {
3855 spin_unlock_irqrestore(&stream_info->lock, flags);
3856 stream_info->undelivered_request_cnt--;
3857 pr_err_ratelimited("%s: Invalid undeliver frame count %d\n",
3858 __func__, stream_info->undelivered_request_cnt);
3859 return -EINVAL;
3860 }
3861
3862 rc = msm_isp_calculate_framedrop(vfe_dev, &stream_cfg_cmd);
3863 if (rc == 0)
3864 msm_isp_reset_framedrop(vfe_dev, stream_info);
3865
3866 /*Avoid Multiple request frames for single SOF*/
3867 vfe_dev->axi_data.src_info[frame_src].accept_frame = false;
3868
3869 spin_unlock_irqrestore(&stream_info->lock, flags);
3870
3871 return rc;
3872error:
3873 rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
3874 user_stream_id, frame_id, buf_index, frame_src);
3875 if (rc < 0)
3876 pr_err("%s:%d failed: return_empty_buffer src %d\n",
3877 __func__, __LINE__, frame_src);
3878 return 0;
3879
3880}
3881
3882static int msm_isp_add_buf_queue(struct vfe_device *vfe_dev,
3883 struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
3884{
3885 int rc = 0;
3886 uint32_t bufq_id = 0;
3887 unsigned long flags;
3888
3889 if (stream_id == stream_info->stream_id)
3890 bufq_id = VFE_BUF_QUEUE_DEFAULT;
3891 else
3892 bufq_id = VFE_BUF_QUEUE_SHARED;
3893
3894 spin_lock_irqsave(&stream_info->lock, flags);
3895
3896 if (stream_info->bufq_handle[bufq_id] == 0) {
3897 stream_info->bufq_handle[bufq_id] =
3898 vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
3899 stream_info->session_id, stream_id);
3900 if (stream_info->bufq_handle[bufq_id] == 0) {
3901 spin_unlock_irqrestore(&stream_info->lock, flags);
3902 pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
3903 __func__, stream_id);
3904 return -EINVAL;
3905 }
3906 } else {
3907 uint32_t bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
3908 vfe_dev->buf_mgr,
3909 stream_info->session_id,
3910 stream_id);
3911 if (bufq_handle != stream_info->bufq_handle[bufq_id]) {
3912 spin_unlock_irqrestore(&stream_info->lock, flags);
3913 pr_err("%s: Stream %x already has buffer q %x cannot add handle %x\n",
3914 __func__, stream_id,
3915 stream_info->bufq_handle[bufq_id], bufq_handle);
3916 return -EINVAL;
3917 }
3918 }
3919
3920 spin_unlock_irqrestore(&stream_info->lock, flags);
3921
3922 ISP_DBG("%d: Add bufq handle:0x%x, idx:%d, for stream %d on VFE %d\n",
3923 __LINE__, stream_info->bufq_handle[bufq_id],
3924 bufq_id, stream_info->stream_handle[0],
3925 vfe_dev->pdev->id);
3926
3927 return rc;
3928}
3929
3930static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
3931 struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
3932{
3933 uint32_t bufq_id = 0;
3934 unsigned long flags;
3935
3936 if (stream_id == stream_info->stream_id)
3937 bufq_id = VFE_BUF_QUEUE_DEFAULT;
3938 else
3939 bufq_id = VFE_BUF_QUEUE_SHARED;
3940
3941 spin_lock_irqsave(&stream_info->lock, flags);
3942
3943 if (stream_info->bufq_handle[bufq_id]) {
3944 stream_info->bufq_handle[bufq_id] = 0;
3945 if (stream_info->state == ACTIVE) {
3946 init_completion(&stream_info->active_comp);
3947 stream_info->state = UPDATING;
3948 }
3949 }
3950 spin_unlock_irqrestore(&stream_info->lock, flags);
3951 if (stream_info->state == UPDATING)
3952 msm_isp_axi_wait_for_stream_cfg_done(stream_info, 1);
3953
3954}
3955
3956/**
3957 * msm_isp_stream_axi_cfg_update() - Apply axi config update to a stream
3958 * @vfe_dev: The vfe device on which the update is to be applied
3959 * @stream_info: Stream for which update is to be applied
3960 * @update_info: Parameters of the update
3961 *
3962 * Returns - 0 on success else error code
3963 *
3964 * For dual vfe stream apply the update once update for both vfe is
3965 * received.
3966 */
3967static int msm_isp_stream_axi_cfg_update(struct vfe_device *vfe_dev,
3968 struct msm_vfe_axi_stream *stream_info,
3969 struct msm_vfe_axi_stream_cfg_update_info *update_info)
3970{
3971 int j;
3972 int k;
3973 unsigned long flags;
3974 int vfe_idx;
3975
Pratap Nirujogi6e759912018-01-17 17:51:17 +05303976 spin_lock_irqsave(&stream_info->lock, flags);
3977 if (stream_info->state != ACTIVE) {
3978 spin_unlock_irqrestore(&stream_info->lock, flags);
3979 pr_err("Invalid stream state for axi update %d\n",
3980 stream_info->state);
3981 return -EINVAL;
3982 }
3983 if (stream_info->update_vfe_mask) {
3984 if (stream_info->update_vfe_mask & (1 << vfe_dev->pdev->id)) {
3985 spin_unlock_irqrestore(&stream_info->lock, flags);
3986 pr_err("%s: Stream %pK/%x Update already in progress for vfe %d\n",
3987 __func__, stream_info, stream_info->stream_src,
3988 vfe_dev->pdev->id);
3989 return -EINVAL;
3990 }
3991 }
3992 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
3993
3994 for (j = 0; j < stream_info->num_planes; j++)
3995 stream_info->plane_cfg[vfe_idx][j] = update_info->plane_cfg[j];
3996
3997 stream_info->update_vfe_mask |= (1 << vfe_dev->pdev->id);
3998 /* wait for update from all vfe's under stream before applying */
3999 if (stream_info->update_vfe_mask != stream_info->vfe_mask) {
4000 spin_unlock_irqrestore(&stream_info->lock, flags);
4001 return 0;
4002 }
4003
4004 atomic_set(&vfe_dev->axi_data.axi_cfg_update[
4005 SRC_TO_INTF(stream_info->stream_src)], 1);
4006 stream_info->output_format = update_info->output_format;
4007 init_completion(&stream_info->active_comp);
4008 if (((vfe_dev->hw_info->runtime_axi_update == 0) ||
4009 (vfe_dev->dual_vfe_enable == 1))) {
4010 stream_info->state = PAUSE_PENDING;
4011 msm_isp_axi_stream_enable_cfg(stream_info);
4012 stream_info->state = PAUSING;
4013 } else {
4014 for (j = 0; j < stream_info->num_planes; j++) {
4015 for (k = 0; k < stream_info->num_isp; k++) {
4016 vfe_dev = stream_info->vfe_dev[k];
4017 vfe_dev->hw_info->vfe_ops.axi_ops.
4018 cfg_wm_reg(vfe_dev, stream_info, j);
4019 }
4020 }
4021 stream_info->state = RESUMING;
4022 }
4023 spin_unlock_irqrestore(&stream_info->lock, flags);
4024 return 0;
4025}
4026
4027int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
4028{
Meera Gande779d64a2018-06-14 15:12:17 +05304029 int rc = 0, i, j, k;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304030 struct msm_vfe_axi_stream *stream_info;
4031 struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
4032 struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
4033 struct msm_isp_sw_framskip *sw_skip_info = NULL;
4034 unsigned long flags;
4035 struct msm_isp_timestamp timestamp;
4036 uint32_t frame_id;
4037 int vfe_idx;
4038
4039 /*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
4040 if (update_cmd->num_streams > MAX_NUM_STREAM)
4041 return -EINVAL;
4042
4043 for (i = 0; i < update_cmd->num_streams; i++) {
4044 update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
4045 &update_cmd->update_info[i];
4046 /*check array reference bounds*/
4047 if (HANDLE_TO_IDX(update_info->stream_handle) >=
4048 VFE_AXI_SRC_MAX) {
4049 return -EINVAL;
4050 }
4051 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4052 HANDLE_TO_IDX(update_info->stream_handle));
4053 if (!stream_info) {
4054 pr_err("%s: stream_info is null", __func__);
4055 return -EINVAL;
4056 }
4057 if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX)
4058 continue;
4059 if (stream_info->state != ACTIVE &&
4060 stream_info->state != INACTIVE &&
4061 update_cmd->update_type !=
4062 UPDATE_STREAM_REQUEST_FRAMES &&
4063 update_cmd->update_type !=
4064 UPDATE_STREAM_REMOVE_BUFQ &&
4065 update_cmd->update_type !=
Meera Gande3eff6a62018-07-03 17:12:53 +05304066 UPDATE_STREAM_SW_FRAME_DROP &&
4067 update_cmd->update_type !=
4068 UPDATE_STREAM_REQUEST_FRAMES_VER2) {
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304069 pr_err("%s: Invalid stream state %d, update cmd %d\n",
4070 __func__, stream_info->state,
4071 stream_info->stream_id);
4072 return -EINVAL;
4073 }
4074 if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
4075 stream_info->state != ACTIVE) {
4076 pr_err("%s: AXI stream config updating\n", __func__);
4077 return -EBUSY;
4078 }
4079 }
4080
4081 switch (update_cmd->update_type) {
4082 case ENABLE_STREAM_BUF_DIVERT:
4083 for (i = 0; i < update_cmd->num_streams; i++) {
4084 update_info =
4085 (struct msm_vfe_axi_stream_cfg_update_info *)
4086 &update_cmd->update_info[i];
4087 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4088 HANDLE_TO_IDX(update_info->stream_handle));
4089 if (!stream_info) {
4090 pr_err("%s: stream_info is null", __func__);
4091 return -EINVAL;
4092 }
4093 stream_info->buf_divert = 1;
4094 }
4095 break;
4096 case DISABLE_STREAM_BUF_DIVERT:
4097 for (i = 0; i < update_cmd->num_streams; i++) {
4098 update_info =
4099 (struct msm_vfe_axi_stream_cfg_update_info *)
4100 &update_cmd->update_info[i];
4101 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4102 HANDLE_TO_IDX(update_info->stream_handle));
4103 if (!stream_info) {
4104 pr_err("%s: stream_info is null", __func__);
4105 return -EINVAL;
4106 }
4107 stream_info->buf_divert = 0;
4108 msm_isp_get_timestamp(&timestamp, vfe_dev);
4109 frame_id = vfe_dev->axi_data.src_info[
4110 SRC_TO_INTF(stream_info->stream_src)].frame_id;
4111 /* set ping pong address to scratch before flush */
4112 spin_lock_irqsave(&stream_info->lock, flags);
4113 msm_isp_cfg_stream_scratch(stream_info,
4114 VFE_PING_FLAG);
4115 msm_isp_cfg_stream_scratch(stream_info,
4116 VFE_PONG_FLAG);
4117 spin_unlock_irqrestore(&stream_info->lock, flags);
4118 rc = vfe_dev->buf_mgr->ops->flush_buf(
4119 vfe_dev->buf_mgr,
4120 stream_info->bufq_handle
4121 [VFE_BUF_QUEUE_DEFAULT],
4122 MSM_ISP_BUFFER_FLUSH_DIVERTED,
4123 &timestamp.buf_time, frame_id);
4124 if (rc == -EFAULT) {
4125 msm_isp_halt_send_error(vfe_dev,
4126 ISP_EVENT_BUF_FATAL_ERROR);
4127 return rc;
4128 }
4129 }
4130 break;
4131 case UPDATE_STREAM_FRAMEDROP_PATTERN: {
4132 for (i = 0; i < update_cmd->num_streams; i++) {
4133 uint32_t framedrop_period =
4134 msm_isp_get_framedrop_period(
4135 update_info->skip_pattern);
4136 update_info =
4137 (struct msm_vfe_axi_stream_cfg_update_info *)
4138 &update_cmd->update_info[i];
4139 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4140 HANDLE_TO_IDX(update_info->stream_handle));
4141 if (!stream_info) {
4142 pr_err("%s: stream_info is null", __func__);
4143 return -EINVAL;
4144 }
4145 spin_lock_irqsave(&stream_info->lock, flags);
4146 /* no change then break early */
4147 if (stream_info->current_framedrop_period ==
4148 framedrop_period) {
4149 spin_unlock_irqrestore(&stream_info->lock,
4150 flags);
4151 break;
4152 }
4153 if (stream_info->controllable_output) {
4154 pr_err("Controllable output streams does not support custom frame skip pattern\n");
4155 spin_unlock_irqrestore(&stream_info->lock,
4156 flags);
4157 return -EINVAL;
4158 }
4159 if (update_info->skip_pattern == SKIP_ALL)
4160 stream_info->current_framedrop_period =
4161 MSM_VFE_STREAM_STOP_PERIOD;
4162 else
4163 stream_info->current_framedrop_period =
4164 framedrop_period;
4165 if (stream_info->stream_type != BURST_STREAM)
4166 msm_isp_cfg_framedrop_reg(stream_info);
4167 spin_unlock_irqrestore(&stream_info->lock, flags);
4168 }
4169 break;
4170 }
4171 case UPDATE_STREAM_SW_FRAME_DROP: {
4172 for (i = 0; i < update_cmd->num_streams; i++) {
4173 update_info =
4174 (struct msm_vfe_axi_stream_cfg_update_info *)
4175 &update_cmd->update_info[i];
4176 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4177 HANDLE_TO_IDX(update_info->stream_handle));
4178 if (!stream_info) {
4179 pr_err("%s: stream_info is null", __func__);
4180 return -EINVAL;
4181 }
4182 sw_skip_info = &update_info->sw_skip_info;
4183 if (sw_skip_info &&
4184 sw_skip_info->stream_src_mask != 0) {
4185 /* SW image buffer drop */
4186 pr_debug("%x sw skip type %x mode %d min %d max %d\n",
4187 stream_info->stream_id,
4188 sw_skip_info->stats_type_mask,
4189 sw_skip_info->skip_mode,
4190 sw_skip_info->min_frame_id,
4191 sw_skip_info->max_frame_id);
4192 spin_lock_irqsave(&stream_info->lock, flags);
4193 stream_info->sw_skip = *sw_skip_info;
4194 spin_unlock_irqrestore(&stream_info->lock,
4195 flags);
4196 }
4197 }
4198 break;
4199 }
4200 case UPDATE_STREAM_AXI_CONFIG: {
4201 for (i = 0; i < update_cmd->num_streams; i++) {
4202 update_info =
4203 (struct msm_vfe_axi_stream_cfg_update_info *)
4204 &update_cmd->update_info[i];
4205 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4206 HANDLE_TO_IDX(update_info->stream_handle));
4207 if (!stream_info) {
4208 pr_err("%s: stream_info is null", __func__);
4209 return -EINVAL;
4210 }
4211 rc = msm_isp_stream_axi_cfg_update(vfe_dev, stream_info,
4212 update_info);
4213 if (rc)
4214 return rc;
4215 }
4216 break;
4217 }
4218 case UPDATE_STREAM_REQUEST_FRAMES: {
4219 for (i = 0; i < update_cmd->num_streams; i++) {
4220 update_info =
4221 (struct msm_vfe_axi_stream_cfg_update_info *)
4222 &update_cmd->update_info[i];
4223 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4224 HANDLE_TO_IDX(update_info->stream_handle));
4225 if (!stream_info) {
4226 pr_err("%s: stream_info is null", __func__);
4227 return -EINVAL;
4228 }
Meera Gande488acf42018-03-23 16:13:18 +05304229 mutex_lock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304230 rc = msm_isp_request_frame(vfe_dev, stream_info,
4231 update_info->user_stream_id,
4232 update_info->frame_id,
4233 MSM_ISP_INVALID_BUF_INDEX);
Meera Gande488acf42018-03-23 16:13:18 +05304234 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304235 if (rc)
4236 pr_err("%s failed to request frame!\n",
4237 __func__);
4238 }
4239 break;
4240 }
4241 case UPDATE_STREAM_ADD_BUFQ: {
4242 for (i = 0; i < update_cmd->num_streams; i++) {
4243 update_info =
4244 (struct msm_vfe_axi_stream_cfg_update_info *)
4245 &update_cmd->update_info[i];
4246 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4247 HANDLE_TO_IDX(update_info->stream_handle));
4248 if (!stream_info) {
4249 pr_err("%s: stream_info is null", __func__);
4250 return -EINVAL;
4251 }
4252 rc = msm_isp_add_buf_queue(vfe_dev, stream_info,
4253 update_info->user_stream_id);
4254 if (rc)
4255 pr_err("%s failed to add bufq!\n", __func__);
4256 }
4257 break;
4258 }
4259 case UPDATE_STREAM_REMOVE_BUFQ: {
4260 for (i = 0; i < update_cmd->num_streams; i++) {
4261 update_info =
4262 (struct msm_vfe_axi_stream_cfg_update_info *)
4263 &update_cmd->update_info[i];
4264 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4265 HANDLE_TO_IDX(update_info->stream_handle));
4266 if (!stream_info) {
4267 pr_err("%s: stream_info is null", __func__);
4268 return -EINVAL;
4269 }
4270 msm_isp_remove_buf_queue(vfe_dev, stream_info,
4271 update_info->user_stream_id);
4272 pr_debug("%s, Remove bufq for Stream 0x%x\n",
4273 __func__, stream_info->stream_id);
4274 }
4275 break;
4276 }
4277 case UPDATE_STREAM_REQUEST_FRAMES_VER2: {
4278 struct msm_vfe_axi_stream_cfg_update_info_req_frm *req_frm =
4279 &update_cmd->req_frm_ver2;
4280 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4281 HANDLE_TO_IDX(req_frm->stream_handle));
4282 if (!stream_info) {
4283 pr_err("%s: stream_info is null", __func__);
4284 return -EINVAL;
4285 }
Meera Gande488acf42018-03-23 16:13:18 +05304286 mutex_lock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304287 rc = msm_isp_request_frame(vfe_dev, stream_info,
4288 req_frm->user_stream_id,
4289 req_frm->frame_id,
4290 req_frm->buf_index);
Meera Gande488acf42018-03-23 16:13:18 +05304291 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304292 if (rc)
4293 pr_err("%s failed to request frame!\n",
4294 __func__);
4295 break;
4296 }
4297 case UPDATE_STREAM_OFFLINE_AXI_CONFIG: {
4298 for (i = 0; i < update_cmd->num_streams; i++) {
4299 update_info =
4300 (struct msm_vfe_axi_stream_cfg_update_info *)
4301 &update_cmd->update_info[i];
4302 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4303 HANDLE_TO_IDX(update_info->stream_handle));
4304 if (!stream_info) {
4305 pr_err("%s: stream_info is null", __func__);
4306 return -EINVAL;
4307 }
4308 vfe_idx = msm_isp_get_vfe_idx_for_stream(
4309 vfe_dev, stream_info);
Meera Gande779d64a2018-06-14 15:12:17 +05304310 for (j = 0; j < stream_info->num_planes; j++) {
4311 stream_info->plane_cfg[vfe_idx][j] =
4312 update_info->plane_cfg[j];
4313 for (k = 0; k < stream_info->num_isp; k++) {
4314 vfe_dev = stream_info->vfe_dev[k];
4315 vfe_dev->hw_info->vfe_ops.axi_ops.
4316 cfg_wm_reg(vfe_dev,
4317 stream_info, j);
4318 }
4319 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304320 }
4321 break;
4322 }
4323 default:
4324 pr_err("%s: Invalid update type %d\n", __func__,
4325 update_cmd->update_type);
4326 return -EINVAL;
4327 }
4328
4329 return rc;
4330}
4331
4332void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
4333 struct msm_vfe_axi_stream *stream_info,
4334 uint32_t pingpong_status,
4335 struct msm_isp_timestamp *ts)
4336{
4337 int rc = -1;
4338 uint32_t pingpong_bit = 0, i;
4339 struct msm_isp_buffer *done_buf = NULL;
4340 unsigned long flags;
4341 struct timeval *time_stamp;
4342 uint32_t frame_id, buf_index = -1;
4343 int vfe_idx;
Ramesh Vfb407b72019-05-10 16:42:10 +05304344 struct vfe_device *temp_dev;
4345 int other_vfe_id;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304346
4347 if (!ts) {
4348 pr_err("%s: Error! Invalid argument\n", __func__);
4349 return;
4350 }
4351
Srikanth Uyyala2d52bd12018-03-05 14:05:21 +05304352 if (vfe_dev->vt_enable) {
4353 msm_isp_get_avtimer_ts(ts);
4354 time_stamp = &ts->vt_time;
4355 } else {
4356 time_stamp = &ts->buf_time;
4357 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304358
4359 frame_id = vfe_dev->axi_data.
4360 src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
4361
4362 spin_lock_irqsave(&stream_info->lock, flags);
4363 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
4364 pingpong_bit = (~(pingpong_status >>
4365 stream_info->wm[vfe_idx][0]) & 0x1);
4366 for (i = 0; i < stream_info->num_planes; i++) {
4367 if (pingpong_bit !=
4368 (~(pingpong_status >>
4369 stream_info->wm[vfe_idx][i]) & 0x1)) {
4370 spin_unlock_irqrestore(&stream_info->lock, flags);
4371 msm_isp_dump_ping_pong_mismatch(vfe_dev);
4372 pr_err("%s: Write master ping pong mismatch. Status: 0x%x %x\n",
4373 __func__, pingpong_status,
4374 stream_info->stream_src);
4375 msm_isp_halt_send_error(vfe_dev,
4376 ISP_EVENT_PING_PONG_MISMATCH);
4377 return;
4378 }
4379 }
4380 if (stream_info->state == INACTIVE) {
4381 WARN_ON(stream_info->buf[pingpong_bit] != NULL);
4382 spin_unlock_irqrestore(&stream_info->lock, flags);
4383 return;
4384 }
4385
4386 /* composite the irq for dual vfe */
4387 rc = msm_isp_composite_irq(vfe_dev, stream_info,
4388 MSM_ISP_COMP_IRQ_PING_BUFDONE + pingpong_bit);
4389 if (rc) {
4390 spin_unlock_irqrestore(&stream_info->lock, flags);
4391 if (rc < 0)
4392 msm_isp_halt_send_error(vfe_dev,
4393 ISP_EVENT_PING_PONG_MISMATCH);
4394 return;
4395 }
4396
4397 done_buf = stream_info->buf[pingpong_bit];
4398
4399 if (vfe_dev->buf_mgr->frameId_mismatch_recovery == 1) {
4400 if (done_buf) {
4401 if (done_buf->is_drop_reconfig == 1)
4402 done_buf->is_drop_reconfig = 0;
4403 }
4404 pr_err_ratelimited("%s: Mismatch Recovery in progress, drop frame!\n",
4405 __func__);
4406 spin_unlock_irqrestore(&stream_info->lock, flags);
4407 return;
4408 }
4409
4410 if (done_buf)
4411 buf_index = done_buf->buf_idx;
4412
4413 ISP_DBG("%s: vfe %d: stream 0x%x, frame id %d, pingpong bit %d\n",
4414 __func__,
4415 vfe_dev->pdev->id,
4416 stream_info->stream_id,
4417 frame_id,
4418 pingpong_bit);
4419
4420 stream_info->frame_id++;
4421 stream_info->buf[pingpong_bit] = NULL;
4422
4423 if (stream_info->controllable_output &&
4424 (done_buf != NULL) &&
4425 (stream_info->sw_ping_pong_bit == -1) &&
4426 (done_buf->is_drop_reconfig == 1)) {
4427 /* When wm reloaded and corresponding reg_update fail
4428 * then buffer is reconfig as PING buffer. so, avoid
4429 * NULL assignment to PING buffer and eventually
4430 * next AXI_DONE or buf_done can be successful
4431 */
4432 stream_info->buf[pingpong_bit] = done_buf;
4433 }
4434
4435 if (stream_info->stream_type == CONTINUOUS_STREAM ||
4436 stream_info->runtime_num_burst_capture > 1) {
4437 rc = msm_isp_cfg_ping_pong_address(
4438 stream_info, pingpong_status, NULL);
4439 if (rc < 0)
4440 ISP_DBG("%s: Error configuring ping_pong\n",
4441 __func__);
4442 } else if (done_buf && (done_buf->is_drop_reconfig != 1)) {
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05304443 int32_t frame_id_diff;
4444 /* irq_sof should be always >= tasklet SOF id
4445 * For dual camera usecase irq_sof could be behind
4446 * as software frameid sync logic epoch event could
4447 * update slave frame id so update if irqsof < tasklet sof
4448 */
4449 if (vfe_dev->irq_sof_id < frame_id)
4450 vfe_dev->irq_sof_id = frame_id;
4451
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +05304452 frame_id_diff = vfe_dev->irq_sof_id - frame_id;
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05304453 if (stream_info->controllable_output && frame_id_diff > 1) {
Ramesh Vc97c71e2019-04-17 10:50:13 +05304454 pr_err_ratelimited("%s: scheduling problem do recovery irq_sof_id %d frame_id %d\n",
4455 __func__, vfe_dev->irq_sof_id, frame_id);
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +05304456 /* scheduling problem need to do recovery */
Ramesh Vc97c71e2019-04-17 10:50:13 +05304457 stream_info->buf[pingpong_bit] = done_buf;
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05304458 spin_unlock_irqrestore(&stream_info->lock, flags);
4459 msm_isp_halt_send_error(vfe_dev,
4460 ISP_EVENT_PING_PONG_MISMATCH);
4461 return;
4462 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304463 msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
4464 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304465 if (!done_buf) {
4466 if (stream_info->buf_divert) {
4467 vfe_dev->error_info.stream_framedrop_count[
4468 stream_info->bufq_handle[
4469 VFE_BUF_QUEUE_DEFAULT] & 0xFF]++;
4470 vfe_dev->error_info.framedrop_flag = 1;
Ramesh Vfb407b72019-05-10 16:42:10 +05304471 if (vfe_dev->is_split) {
4472 other_vfe_id = OTHER_VFE(vfe_dev->pdev->id);
4473 temp_dev =
4474 vfe_dev->common_data->dual_vfe_res->vfe_dev[
4475 other_vfe_id];
4476 temp_dev->error_info.stream_framedrop_count[
4477 stream_info->bufq_handle[
4478 VFE_BUF_QUEUE_DEFAULT] & 0xFF]++;
4479 temp_dev->error_info.framedrop_flag = 1;
4480 }
4481
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304482 }
4483 spin_unlock_irqrestore(&stream_info->lock, flags);
4484 return;
4485 }
4486
4487 if (stream_info->stream_type == BURST_STREAM &&
4488 stream_info->runtime_num_burst_capture) {
4489 ISP_DBG("%s: burst_frame_count: %d\n",
4490 __func__,
4491 stream_info->runtime_num_burst_capture);
4492 stream_info->runtime_num_burst_capture--;
4493 }
4494
4495 rc = msm_isp_update_deliver_count(vfe_dev, stream_info,
4496 pingpong_bit, done_buf);
4497 if (rc) {
4498 if (done_buf->is_drop_reconfig == 1)
4499 done_buf->is_drop_reconfig = 0;
4500 spin_unlock_irqrestore(&stream_info->lock, flags);
4501 pr_err_ratelimited("%s:VFE%d get done buf fail\n",
4502 __func__, vfe_dev->pdev->id);
4503 msm_isp_halt_send_error(vfe_dev,
4504 ISP_EVENT_PING_PONG_MISMATCH);
4505 return;
4506 }
4507
4508
4509 if ((done_buf->frame_id != frame_id) &&
4510 vfe_dev->axi_data.enable_frameid_recovery) {
4511 if (done_buf->is_drop_reconfig == 1)
4512 done_buf->is_drop_reconfig = 0;
4513 spin_unlock_irqrestore(&stream_info->lock, flags);
4514 msm_isp_handle_done_buf_frame_id_mismatch(vfe_dev,
4515 stream_info, done_buf, time_stamp, frame_id);
4516 return;
4517 }
4518
4519 if (done_buf->is_drop_reconfig == 1) {
4520 /* When ping/pong buf is already reconfigured
4521 * then dont issue buf-done for current buffer
4522 */
4523 done_buf->is_drop_reconfig = 0;
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05304524 if (!stream_info->buf[pingpong_bit]) {
Srikanth Uyyala03a06cb2019-03-27 14:21:07 +05304525 /* samebuffer is not re-programeed so write scratch */
Srikanth Uyyalaf7a82542019-03-22 10:41:46 +05304526 msm_isp_cfg_stream_scratch(stream_info,
4527 pingpong_status);
4528 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304529 spin_unlock_irqrestore(&stream_info->lock, flags);
4530 } else {
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +05304531 /* If there is no regupdate from userspace then dont
4532 * free buffer immediately, delegate it to RegUpdateAck
4533 */
4534 if (stream_info->controllable_output &&
4535 !(vfe_dev->reg_update_requested &
4536 BIT((uint32_t)VFE_PIX_0))) {
4537 stream_info->pending_buf_info.is_buf_done_pending = 1;
4538 stream_info->pending_buf_info.buf = done_buf;
4539 stream_info->pending_buf_info.frame_id = frame_id;
4540 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304541 spin_unlock_irqrestore(&stream_info->lock, flags);
Lokesh Kumar Aakulu2a8a9932019-03-11 17:27:09 +05304542 if (stream_info->pending_buf_info.is_buf_done_pending != 1) {
4543 msm_isp_process_done_buf(vfe_dev, stream_info,
4544 done_buf, time_stamp, frame_id);
4545 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304546 }
4547}
4548
4549void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
4550 uint32_t irq_status0, uint32_t irq_status1,
Srikanth Uyyala0ec73a92018-03-12 18:44:40 +05304551 uint32_t pingpong_status, struct msm_isp_timestamp *ts)
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304552{
4553 int i, rc = 0;
4554 uint32_t comp_mask = 0, wm_mask = 0;
Srikanth Uyyala0ec73a92018-03-12 18:44:40 +05304555 uint32_t stream_idx;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304556 struct msm_vfe_axi_stream *stream_info;
4557 struct msm_vfe_axi_composite_info *comp_info;
4558 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
4559 int wm;
4560
4561 comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
4562 get_comp_mask(irq_status0, irq_status1);
4563 wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
4564 get_wm_mask(irq_status0, irq_status1);
4565 if (!(comp_mask || wm_mask))
4566 return;
4567
4568 ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05304569
4570 for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
4571 rc = 0;
4572 comp_info = &axi_data->composite_info[i];
4573 wm_mask &= ~(comp_info->stream_composite_mask);
4574 if (comp_mask & (1 << i)) {
4575 stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
4576 if ((!comp_info->stream_handle) ||
4577 (stream_idx >= VFE_AXI_SRC_MAX)) {
4578 pr_err_ratelimited("%s: Invalid handle for composite irq\n",
4579 __func__);
4580 for (wm = 0; wm < axi_data->hw_info->num_wm;
4581 wm++)
4582 if (comp_info->stream_composite_mask &
4583 (1 << wm))
4584 msm_isp_cfg_wm_scratch(vfe_dev,
4585 wm, pingpong_status);
4586 continue;
4587 }
4588 stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
4589 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4590 stream_idx);
4591
4592 msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
4593 pingpong_status, ts);
4594
4595 }
4596 }
4597
4598 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
4599 if (wm_mask & (1 << i)) {
4600 stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
4601 if ((!axi_data->free_wm[i]) ||
4602 (stream_idx >= VFE_AXI_SRC_MAX)) {
4603 pr_err("%s: Invalid handle for wm irq\n",
4604 __func__);
4605 msm_isp_cfg_wm_scratch(vfe_dev, i,
4606 pingpong_status);
4607 continue;
4608 }
4609 stream_info = msm_isp_get_stream_common_data(vfe_dev,
4610 stream_idx);
4611
4612 msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
4613 pingpong_status, ts);
4614 }
4615 }
4616}
4617
4618void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
4619{
4620 struct msm_vfe_axi_stream *stream_info;
4621 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
4622 int i, j;
4623 int vfe_idx;
4624
4625 if (!vfe_dev || !axi_data) {
4626 pr_err("%s: error %pK %pK\n", __func__, vfe_dev, axi_data);
4627 return;
4628 }
4629
4630 for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
4631 stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
4632
4633 if (stream_info->state != ACTIVE)
4634 continue;
4635
4636 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
4637 stream_info);
4638 for (j = 0; j < stream_info->num_planes; j++)
4639 vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
4640 vfe_dev->vfe_base,
4641 stream_info->wm[vfe_idx][j], 0);
4642 }
4643}