blob: de7d9edccf5f1e645400427ab83976a84eed7b47 [file] [log] [blame]
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/ratelimit.h>
15#include <asm/div64.h>
16#include "msm_isp40.h"
17#include "msm_isp_util.h"
18#include "msm_isp_axi_util.h"
19#include "msm_isp_stats_util.h"
20#include "msm_isp.h"
21#include "msm.h"
22#include "msm_camera_io_util.h"
23#include "msm_isp47.h"
24#include "linux/iopoll.h"
25
26#undef CDBG
27#define CDBG(fmt, args...) pr_debug(fmt, ##args)
28
29#define VFE40_BURST_LEN 1
30#define VFE40_BURST_LEN_8916_VERSION 2
31#define VFE40_BURST_LEN_8952_VERSION 3
32#define VFE40_WM_BIT_SHIFT 4
33#define VFE40_WM_BIT_SHIFT_8976_VERSION 3
34#define VFE40_STATS_BURST_LEN 1
35#define VFE40_STATS_BURST_LEN_8916_VERSION 2
36#define VFE40_FETCH_BURST_LEN 3
37#define VFE40_UB_SIZE 1536 /* 1536 * 128 bits = 24KB */
38#define VFE40_UB_SIZE_8952 2048 /* 2048 * 128 bits = 32KB */
39#define VFE40_UB_SIZE_8916 3072 /* 3072 * 128 bits = 48KB */
40#define VFE40_EQUAL_SLICE_UB 190 /* (UB_SIZE - STATS SIZE)/6 */
41#define VFE40_EQUAL_SLICE_UB_8916 236
42#define VFE40_TOTAL_WM_UB 1144 /* UB_SIZE - STATS SIZE */
43#define VFE40_TOTAL_WM_UB_8916 1656
44#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
45#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
46#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
47#define VFE40_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
48#define VFE40_PING_PONG_BASE(wm, ping_pong) \
49 (VFE40_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
50
51#define VFE40_BUS_RD_CGC_OVERRIDE_BIT 16
52
53#define STATS_IDX_BE 0
54#define STATS_IDX_BG 1
55#define STATS_IDX_BF 2
56#define STATS_IDX_AWB 3
57#define STATS_IDX_RS 4
58#define STATS_IDX_CS 5
59#define STATS_IDX_IHIST 6
60#define STATS_IDX_BHIST 7
61
62static uint8_t stats_pingpong_offset_map[] = {
63 8, 9, 10, 11, 12, 13, 14, 15};
64
65#define VFE40_NUM_STATS_TYPE 8
66#define VFE40_STATS_BASE(idx) (0x168 + 0x18 * idx)
67#define VFE40_STATS_PING_PONG_BASE(idx, ping_pong) \
68 (VFE40_STATS_BASE(idx) + 0x4 * \
69 (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1))
70
71#define VFE40_VBIF_CLKON 0x4
72#define VFE40_VBIF_IN_RD_LIM_CONF0 0xB0
73#define VFE40_VBIF_IN_RD_LIM_CONF1 0xB4
74#define VFE40_VBIF_IN_RD_LIM_CONF2 0xB8
75#define VFE40_VBIF_IN_WR_LIM_CONF0 0xC0
76#define VFE40_VBIF_IN_WR_LIM_CONF1 0xC4
77#define VFE40_VBIF_IN_WR_LIM_CONF2 0xC8
78#define VFE40_VBIF_OUT_RD_LIM_CONF0 0xD0
79#define VFE40_VBIF_OUT_WR_LIM_CONF0 0xD4
80#define VFE40_VBIF_DDR_OUT_MAX_BURST 0xD8
81#define VFE40_VBIF_OCMEM_OUT_MAX_BURST 0xDC
82#define VFE40_VBIF_ARB_CTL 0xF0
83#define VFE40_VBIF_ROUND_ROBIN_QOS_ARB 0x124
84#define VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x160
85#define VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1 0x164
86#define VFE40_VBIF_OUT_AXI_AOOO_EN 0x178
87#define VFE40_VBIF_OUT_AXI_AOOO 0x17C
88
89#define VFE40_BUS_BDG_QOS_CFG_0 0x000002C4
90#define VFE40_BUS_BDG_QOS_CFG_1 0x000002C8
91#define VFE40_BUS_BDG_QOS_CFG_2 0x000002CC
92#define VFE40_BUS_BDG_QOS_CFG_3 0x000002D0
93#define VFE40_BUS_BDG_QOS_CFG_4 0x000002D4
94#define VFE40_BUS_BDG_QOS_CFG_5 0x000002D8
95#define VFE40_BUS_BDG_QOS_CFG_6 0x000002DC
96#define VFE40_BUS_BDG_QOS_CFG_7 0x000002E0
97
98#define VFE40_CLK_IDX 2
99
100static uint32_t msm_vfe40_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
101{
102 return (VFE40_WM_BASE(idx) + 0x10);
103}
104
105static uint32_t msm_vfe40_get_ub_size(struct vfe_device *vfe_dev)
106{
107 if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION) {
108 vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB_8916;
109 return VFE40_TOTAL_WM_UB_8916;
110 }
111 return VFE40_TOTAL_WM_UB;
112}
113
114static void msm_vfe40_config_irq(struct vfe_device *vfe_dev,
115 uint32_t irq0_mask, uint32_t irq1_mask,
116 enum msm_isp_irq_operation oper)
117{
118 switch (oper) {
119 case MSM_ISP_IRQ_ENABLE:
120 vfe_dev->irq0_mask |= irq0_mask;
121 vfe_dev->irq1_mask |= irq1_mask;
122 msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30);
123 msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x34);
124 msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
125 break;
126 case MSM_ISP_IRQ_DISABLE:
127 vfe_dev->irq0_mask &= ~irq0_mask;
128 vfe_dev->irq1_mask &= ~irq1_mask;
129 break;
130 case MSM_ISP_IRQ_SET:
131 vfe_dev->irq0_mask = irq0_mask;
132 vfe_dev->irq1_mask = irq1_mask;
133 msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30);
134 msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x34);
135 msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
136 }
137 msm_camera_io_w_mb(vfe_dev->irq0_mask, vfe_dev->vfe_base + 0x28);
138 msm_camera_io_w_mb(vfe_dev->irq1_mask, vfe_dev->vfe_base + 0x2C);
139}
140
141static int32_t msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev,
142 struct msm_vfe_hw_init_parms *qos_parms,
143 struct msm_vfe_hw_init_parms *ds_parms)
144{
145 void __iomem *vfebase = vfe_dev->vfe_base;
146 struct device_node *of_node;
147 uint32_t *ds_settings = NULL, *ds_regs = NULL, ds_entries = 0;
148 int32_t i = 0, rc = 0;
149 uint32_t *qos_settings = NULL, *qos_regs = NULL, qos_entries = 0;
150
151 of_node = vfe_dev->pdev->dev.of_node;
152
153 rc = of_property_read_u32(of_node, qos_parms->entries,
154 &qos_entries);
155 if (rc < 0 || !qos_entries) {
156 pr_err("%s: NO QOS entries found\n", __func__);
157 } else {
158 qos_settings = kcalloc(qos_entries, sizeof(uint32_t),
159 GFP_KERNEL);
160 if (!qos_settings)
161 return -ENOMEM;
162 qos_regs = kcalloc(qos_entries, sizeof(uint32_t),
163 GFP_KERNEL);
164 if (!qos_regs) {
165 kfree(qos_settings);
166 return -ENOMEM;
167 }
168 rc = of_property_read_u32_array(of_node, qos_parms->regs,
169 qos_regs, qos_entries);
170 if (rc < 0) {
171 pr_err("%s: NO QOS BUS BDG info\n", __func__);
172 kfree(qos_settings);
173 kfree(qos_regs);
174 } else {
175 if (qos_parms->settings) {
176 rc = of_property_read_u32_array(of_node,
177 qos_parms->settings,
178 qos_settings, qos_entries);
179 if (rc < 0) {
180 pr_err("%s: NO QOS settings\n",
181 __func__);
182 kfree(qos_settings);
183 kfree(qos_regs);
184 } else {
185 for (i = 0; i < qos_entries; i++)
186 msm_camera_io_w(qos_settings[i],
187 vfebase + qos_regs[i]);
188 kfree(qos_settings);
189 kfree(qos_regs);
190 }
191 } else {
192 kfree(qos_settings);
193 kfree(qos_regs);
194 }
195 }
196 }
197 rc = of_property_read_u32(of_node, ds_parms->entries,
198 &ds_entries);
199 if (rc < 0 || !ds_entries) {
200 pr_err("%s: NO D/S entries found\n", __func__);
201 } else {
202 ds_settings = kcalloc(qos_entries, sizeof(uint32_t),
203 GFP_KERNEL);
204 if (!ds_settings)
205 return -ENOMEM;
206 ds_regs = kcalloc(ds_entries, sizeof(uint32_t),
207 GFP_KERNEL);
208 if (!ds_regs) {
209 kfree(ds_settings);
210 return -ENOMEM;
211 }
212 rc = of_property_read_u32_array(of_node, ds_parms->regs,
213 ds_regs, ds_entries);
214 if (rc < 0) {
215 pr_err("%s: NO D/S register info\n", __func__);
216 kfree(ds_settings);
217 kfree(ds_regs);
218 } else {
219 if (ds_parms->settings) {
220 rc = of_property_read_u32_array(of_node,
221 ds_parms->settings, ds_settings,
222 ds_entries);
223 if (rc < 0) {
224 pr_err("%s: NO D/S settings\n",
225 __func__);
226 kfree(ds_settings);
227 kfree(ds_regs);
228 } else {
229 for (i = 0; i < ds_entries; i++)
230 msm_camera_io_w(ds_settings[i],
231 vfebase + ds_regs[i]);
232 kfree(ds_regs);
233 kfree(ds_settings);
234 }
235 } else {
236 kfree(ds_regs);
237 kfree(ds_settings);
238 }
239 }
240 }
241 return 0;
242}
243
244static int32_t msm_vfe40_init_vbif_parms(struct vfe_device *vfe_dev,
245 struct msm_vfe_hw_init_parms *vbif_parms)
246{
247 void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
248 struct device_node *of_node;
249 int32_t i = 0, rc = 0;
250 uint32_t *vbif_settings = NULL, *vbif_regs = NULL, vbif_entries = 0;
251
252 of_node = vfe_dev->pdev->dev.of_node;
253
254 rc = of_property_read_u32(of_node, vbif_parms->entries,
255 &vbif_entries);
256 if (rc < 0 || !vbif_entries) {
257 pr_err("%s: NO VBIF entries found\n", __func__);
258 } else {
259 vbif_settings = kcalloc(vbif_entries, sizeof(uint32_t),
260 GFP_KERNEL);
261 if (!vbif_settings)
262 return -ENOMEM;
263 vbif_regs = kcalloc(vbif_entries, sizeof(uint32_t),
264 GFP_KERNEL);
265 if (!vbif_regs) {
266 kfree(vbif_settings);
267 return -ENOMEM;
268 }
269 rc = of_property_read_u32_array(of_node, vbif_parms->regs,
270 vbif_regs, vbif_entries);
271 if (rc < 0) {
272 pr_err("%s: NO VBIF info\n", __func__);
273 kfree(vbif_settings);
274 kfree(vbif_regs);
275 } else {
276 rc = of_property_read_u32_array(of_node,
277 vbif_parms->settings,
278 vbif_settings, vbif_entries);
279 if (rc < 0) {
280 pr_err("%s: NO VBIF settings\n",
281 __func__);
282 kfree(vbif_settings);
283 kfree(vbif_regs);
284 } else {
285 for (i = 0; i < vbif_entries; i++)
286 msm_camera_io_w(
287 vbif_settings[i],
288 vfe_vbif_base + vbif_regs[i]);
289 kfree(vbif_settings);
290 kfree(vbif_regs);
291 }
292 }
293 }
294 return 0;
295}
296
297static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
298{
299 struct msm_vfe_hw_init_parms qos_parms;
300 struct msm_vfe_hw_init_parms vbif_parms;
301 struct msm_vfe_hw_init_parms ds_parms;
302
303 qos_parms.entries = "qos-entries";
304 qos_parms.regs = "qos-regs";
305 qos_parms.settings = "qos-settings";
306 vbif_parms.entries = "vbif-entries";
307 vbif_parms.regs = "vbif-regs";
308 vbif_parms.settings = "vbif-settings";
309 ds_parms.entries = "ds-entries";
310 ds_parms.regs = "ds-regs";
311 ds_parms.settings = "ds-settings";
312
313 switch (vfe_dev->vfe_hw_version) {
314 case VFE40_8974V1_VERSION:
315 case VFE40_8x26_VERSION:
316 case VFE40_8916_VERSION:
317 case VFE40_8939_VERSION:
318 break;
319 case VFE40_8x26V2_VERSION:
320 qos_parms.settings = "qos-v2-settings";
321 break;
322 case VFE40_8974V2_VERSION:
323 case VFE40_8974V3_VERSION:
324 if (vfe_dev->vfe_hw_version == VFE40_8974V2_VERSION)
325 qos_parms.settings = "qos-v2-settings";
326 else
327 qos_parms.settings = "qos-v3-settings";
328 vbif_parms.entries = "vbif-v2-entries";
329 vbif_parms.regs = "vbif-v2-regs";
330 vbif_parms.settings = "vbif-v2-settings";
331 break;
332 case VFE40_8937_VERSION:
333 case VFE40_8953_VERSION:
334 case VFE40_8917_VERSION:
335 default:
336 ISP_DBG("%s: No special QOS\n", __func__);
337 }
338
339 msm_vfe40_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
340 msm_vfe40_init_vbif_parms(vfe_dev, &vbif_parms);
341 /* BUS_CFG */
342 msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
343 msm_vfe40_config_irq(vfe_dev, 0x800000E0, 0xFEFFFF7E,
344 MSM_ISP_IRQ_ENABLE);
345}
346
347static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev)
348{
349 vfe_dev->irq0_mask = (1 << 31);
350 vfe_dev->irq1_mask = 0;
351 msm_vfe40_config_irq(vfe_dev, (1 << 31), 0,
352 MSM_ISP_IRQ_SET);
353 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
354 msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
355 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
356}
357
358static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
359 uint32_t irq_status0, uint32_t irq_status1)
360{
361 if (irq_status0 & (1 << 31))
362 complete(&vfe_dev->reset_complete);
363}
364
365static void msm_vfe40_process_halt_irq(struct vfe_device *vfe_dev,
366 uint32_t irq_status0, uint32_t irq_status1)
367{
368 if (irq_status1 & (1 << 8)) {
369 complete(&vfe_dev->halt_complete);
370 msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
371 }
372}
373
374static void msm_vfe40_process_input_irq(struct vfe_device *vfe_dev,
375 uint32_t irq_status0, uint32_t irq_status1,
376 struct msm_isp_timestamp *ts)
377{
378 if (!(irq_status0 & 0x1000003))
379 return;
380
381 if (irq_status0 & (1 << 0)) {
382 ISP_DBG("%s: SOF IRQ\n", __func__);
383 msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
384 }
385
386 if (irq_status0 & (1 << 24)) {
387 ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
388 msm_isp_fetch_engine_done_notify(vfe_dev,
389 &vfe_dev->fetch_engine_info);
390 }
391
392 if (irq_status0 & (1 << 1))
393 ISP_DBG("%s: EOF IRQ\n", __func__);
394}
395
396static void msm_vfe40_process_violation_status(
397 struct vfe_device *vfe_dev)
398{
399 uint32_t violation_status = vfe_dev->error_info.violation_status;
400
401 if (!violation_status)
402 return;
403
404 if (violation_status & (1 << 0))
405 pr_err("%s: vfe %d camif violation\n", __func__,
406 vfe_dev->pdev->id);
407 if (violation_status & (1 << 1))
408 pr_err("%s: vfe %d black violation\n", __func__,
409 vfe_dev->pdev->id);
410 if (violation_status & (1 << 2))
411 pr_err("%s: vfe %d rolloff violation\n", __func__,
412 vfe_dev->pdev->id);
413 if (violation_status & (1 << 3))
414 pr_err("%s: demux violation\n", __func__);
415 if (violation_status & (1 << 4))
416 pr_err("%s: demosaic violation\n", __func__);
417 if (violation_status & (1 << 5))
418 pr_err("%s: wb violation\n", __func__);
419 if (violation_status & (1 << 6))
420 pr_err("%s: clf violation\n", __func__);
421 if (violation_status & (1 << 7))
422 pr_err("%s: color correct violation\n", __func__);
423 if (violation_status & (1 << 8))
424 pr_err("%s: rgb lut violation\n", __func__);
425 if (violation_status & (1 << 9))
426 pr_err("%s: la violation\n", __func__);
427 if (violation_status & (1 << 10))
428 pr_err("%s: chroma enhance violation\n", __func__);
429 if (violation_status & (1 << 11))
430 pr_err("%s: chroma suppress mce violation\n", __func__);
431 if (violation_status & (1 << 12))
432 pr_err("%s: skin enhance violation\n", __func__);
433 if (violation_status & (1 << 13))
434 pr_err("%s: color tranform enc violation\n", __func__);
435 if (violation_status & (1 << 14))
436 pr_err("%s: color tranform view violation\n", __func__);
437 if (violation_status & (1 << 15))
438 pr_err("%s: scale enc y violation\n", __func__);
439 if (violation_status & (1 << 16))
440 pr_err("%s: scale enc cbcr violation\n", __func__);
441 if (violation_status & (1 << 17))
442 pr_err("%s: scale view y violation\n", __func__);
443 if (violation_status & (1 << 18))
444 pr_err("%s: scale view cbcr violation\n", __func__);
445 if (violation_status & (1 << 19))
446 pr_err("%s: asf enc violation\n", __func__);
447 if (violation_status & (1 << 20))
448 pr_err("%s: asf view violation\n", __func__);
449 if (violation_status & (1 << 21))
450 pr_err("%s: crop enc y violation\n", __func__);
451 if (violation_status & (1 << 22))
452 pr_err("%s: crop enc cbcr violation\n", __func__);
453 if (violation_status & (1 << 23))
454 pr_err("%s: crop view y violation\n", __func__);
455 if (violation_status & (1 << 24))
456 pr_err("%s: crop view cbcr violation\n", __func__);
457 if (violation_status & (1 << 25))
458 pr_err("%s: realign buf y violation\n", __func__);
459 if (violation_status & (1 << 26))
460 pr_err("%s: realign buf cb violation\n", __func__);
461 if (violation_status & (1 << 27))
462 pr_err("%s: realign buf cr violation\n", __func__);
463}
464
465static void msm_vfe40_process_error_status(struct vfe_device *vfe_dev)
466{
467 uint32_t error_status1 = vfe_dev->error_info.error_mask1;
468
469 if (error_status1 & (1 << 0)) {
470 pr_err_ratelimited("%s: vfe %d camif error status: 0x%x\n",
471 __func__, vfe_dev->pdev->id,
472 vfe_dev->error_info.camif_status);
473 msm_camera_io_dump(vfe_dev->vfe_base + 0x2F4, 0x30, 1);
474 }
475 if (error_status1 & (1 << 1))
476 pr_err_ratelimited("%s: stats bhist overwrite\n", __func__);
477 if (error_status1 & (1 << 2))
478 pr_err_ratelimited("%s: stats cs overwrite\n", __func__);
479 if (error_status1 & (1 << 3))
480 pr_err_ratelimited("%s: stats ihist overwrite\n", __func__);
481 if (error_status1 & (1 << 4))
482 pr_err_ratelimited("%s: realign buf y overflow\n", __func__);
483 if (error_status1 & (1 << 5))
484 pr_err_ratelimited("%s: realign buf cb overflow\n", __func__);
485 if (error_status1 & (1 << 6))
486 pr_err_ratelimited("%s: realign buf cr overflow\n", __func__);
487 if (error_status1 & (1 << 7))
488 msm_vfe40_process_violation_status(vfe_dev);
489 if (error_status1 & (1 << 9)) {
490 vfe_dev->stats->imagemaster0_overflow++;
491 pr_err_ratelimited("%s: image master 0 bus overflow\n",
492 __func__);
493 }
494 if (error_status1 & (1 << 10)) {
495 vfe_dev->stats->imagemaster1_overflow++;
496 pr_err_ratelimited("%s: image master 1 bus overflow\n",
497 __func__);
498 }
499 if (error_status1 & (1 << 11)) {
500 vfe_dev->stats->imagemaster2_overflow++;
501 pr_err_ratelimited("%s: image master 2 bus overflow\n",
502 __func__);
503 }
504 if (error_status1 & (1 << 12)) {
505 vfe_dev->stats->imagemaster3_overflow++;
506 pr_err_ratelimited("%s: image master 3 bus overflow\n",
507 __func__);
508 }
509 if (error_status1 & (1 << 13)) {
510 vfe_dev->stats->imagemaster4_overflow++;
511 pr_err_ratelimited("%s: image master 4 bus overflow\n",
512 __func__);
513 }
514 if (error_status1 & (1 << 14)) {
515 vfe_dev->stats->imagemaster5_overflow++;
516 pr_err_ratelimited("%s: image master 5 bus overflow\n",
517 __func__);
518 }
519 if (error_status1 & (1 << 15)) {
520 vfe_dev->stats->imagemaster6_overflow++;
521 pr_err_ratelimited("%s: image master 6 bus overflow\n",
522 __func__);
523 }
524 if (error_status1 & (1 << 16)) {
525 vfe_dev->stats->be_overflow++;
526 pr_err_ratelimited("%s: status be bus overflow\n", __func__);
527 }
528 if (error_status1 & (1 << 17)) {
529 vfe_dev->stats->bg_overflow++;
530 pr_err_ratelimited("%s: status bg bus overflow\n", __func__);
531 }
532 if (error_status1 & (1 << 18)) {
533 vfe_dev->stats->bf_overflow++;
534 pr_err_ratelimited("%s: status bf bus overflow\n", __func__);
535 }
536 if (error_status1 & (1 << 19)) {
537 vfe_dev->stats->awb_overflow++;
538 pr_err_ratelimited("%s: status awb bus overflow\n", __func__);
539 }
540 if (error_status1 & (1 << 20)) {
541 vfe_dev->stats->rs_overflow++;
542 pr_err_ratelimited("%s: status rs bus overflow\n", __func__);
543 }
544 if (error_status1 & (1 << 21)) {
545 vfe_dev->stats->cs_overflow++;
546 pr_err_ratelimited("%s: status cs bus overflow\n", __func__);
547 }
548 if (error_status1 & (1 << 22)) {
549 vfe_dev->stats->ihist_overflow++;
550 pr_err_ratelimited("%s: status ihist bus overflow\n", __func__);
551 }
552 if (error_status1 & (1 << 23)) {
553 vfe_dev->stats->skinbhist_overflow++;
554 pr_err_ratelimited("%s: status skin bhist bus overflow\n",
555 __func__);
556 }
557
558 /* Update ab/ib values for any overflow that may have occurred*/
559 if ((error_status1 >> 9) & 0x7FFF)
560 msm_isp_update_last_overflow_ab_ib(vfe_dev);
561}
562
563static void msm_vfe40_read_and_clear_irq_status(struct vfe_device *vfe_dev,
564 uint32_t *irq_status0, uint32_t *irq_status1)
565{
566 *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x38);
567 *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
568 /*
569 * Ignore composite 2/3 irq which is used for dual VFE only
570 */
571 if (*irq_status0 & 0x6000000)
572 *irq_status0 &= ~(0x18000000);
573 msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
574 msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x34);
575 msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
576 if (*irq_status0 & 0x18000000) {
577 pr_err_ratelimited("%s: Protection triggered\n", __func__);
578 *irq_status0 &= ~(0x18000000);
579 }
580
581 *irq_status0 &= vfe_dev->irq0_mask;
582 *irq_status1 &= vfe_dev->irq1_mask;
583
584 if (*irq_status1 & (1 << 0)) {
585 vfe_dev->error_info.camif_status =
586 msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
587 msm_vfe40_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
588 }
589
590 if (*irq_status1 & (1 << 7))
591 vfe_dev->error_info.violation_status |=
592 msm_camera_io_r(vfe_dev->vfe_base + 0x48);
593
594}
595
596static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
597 uint32_t *irq_status0, uint32_t *irq_status1)
598{
599 *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x38);
600 *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
601}
602
603static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
604 uint32_t irq_status0, uint32_t irq_status1,
605 struct msm_isp_timestamp *ts)
606{
607 enum msm_vfe_input_src i;
608 uint32_t shift_irq;
609 uint8_t reg_updated = 0;
610 unsigned long flags;
611
612 if (!(irq_status0 & 0xF0))
613 return;
614 /* Shift status bits so that PIX REG UPDATE is 1st bit */
615 shift_irq = ((irq_status0 & 0xF0) >> 4);
616 for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
617 if (shift_irq & BIT(i)) {
618 reg_updated |= BIT(i);
619 ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
620 (uint32_t)BIT(i));
621 switch (i) {
622 case VFE_PIX_0:
623 msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
624 VFE_PIX_0, ts);
625 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
626 MSM_ISP_COMP_IRQ_REG_UPD, ts);
627 msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
628 MSM_ISP_COMP_IRQ_REG_UPD);
629 if (vfe_dev->axi_data.src_info[i].stream_count
630 == 0 &&
631 vfe_dev->axi_data.src_info[i].
632 raw_stream_count == 0 &&
633 vfe_dev->axi_data.src_info[i].active)
634 vfe_dev->hw_info->vfe_ops.core_ops.
635 reg_update(vfe_dev, i);
636 break;
637 case VFE_RAW_0:
638 case VFE_RAW_1:
639 case VFE_RAW_2:
640 msm_isp_increment_frame_id(vfe_dev, i, ts);
641 msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
642 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
643 MSM_ISP_COMP_IRQ_REG_UPD, ts);
644 /*
645 * Reg Update is pseudo SOF for RDI,
646 * so request every frame
647 */
648 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
649 vfe_dev, i);
650 /* reg upd is also epoch for RDI */
651 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
652 MSM_ISP_COMP_IRQ_EPOCH, ts);
653 break;
654 default:
655 pr_err("%s: Error case\n", __func__);
656 return;
657 }
658 }
659 }
660
661 spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
662 if (reg_updated & BIT(VFE_PIX_0))
663 vfe_dev->reg_updated = 1;
664
665 vfe_dev->reg_update_requested &= ~reg_updated;
666 spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
667}
668
669static void msm_vfe40_reg_update(struct vfe_device *vfe_dev,
670 enum msm_vfe_input_src frame_src)
671{
672 uint32_t update_mask = 0;
673 unsigned long flags;
674
675 /* This HW supports upto VFE_RAW_2 */
676 if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
677 pr_err("%s Error case\n", __func__);
678 return;
679 }
680
681 /*
682 * If frame_src == VFE_SRC_MAX request reg_update on all
683 * supported INTF
684 */
685 if (frame_src == VFE_SRC_MAX)
686 update_mask = 0xF;
687 else
688 update_mask = BIT((uint32_t)frame_src);
689 ISP_DBG("%s update_mask %x\n", __func__, update_mask);
690
691 spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
692 vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
693 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
694 vfe_dev->reg_update_requested |= update_mask;
695 vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
696 vfe_dev->reg_update_requested;
697 if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
698 ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
699 if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) {
700 pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__);
701 spin_unlock_irqrestore(&vfe_dev->reg_update_lock,
702 flags);
703 return;
704 }
705 msm_camera_io_w_mb(update_mask,
706 vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
707 + 0x378);
708 msm_camera_io_w_mb(update_mask,
709 vfe_dev->vfe_base + 0x378);
710 } else if (!vfe_dev->is_split ||
711 ((frame_src == VFE_PIX_0) &&
712 (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
713 (vfe_dev->axi_data.src_info[VFE_PIX_0].
714 raw_stream_count == 0)) ||
715 (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
716 msm_camera_io_w_mb(update_mask,
717 vfe_dev->vfe_base + 0x378);
718 }
719 spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
720}
721
722static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
723 uint32_t irq_status0, uint32_t irq_status1,
724 struct msm_isp_timestamp *ts)
725{
726 if (!(irq_status0 & 0xc))
727 return;
728
729 if (irq_status0 & BIT(2)) {
730 msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
731 ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
732 msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
733 MSM_ISP_COMP_IRQ_EPOCH, ts);
734 msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
735 MSM_ISP_COMP_IRQ_EPOCH);
736 msm_isp_update_error_frame_count(vfe_dev);
737 if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
738 && vfe_dev->axi_data.src_info[VFE_PIX_0].
739 stream_count == 0) {
740 ISP_DBG("%s: SOF IRQ\n", __func__);
741 msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
742 msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
743 MSM_ISP_COMP_IRQ_REG_UPD, ts);
744 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
745 vfe_dev, VFE_PIX_0);
746 }
747 }
748}
749
750static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev,
751 uint32_t first_start, uint32_t blocking_call)
752{
753 long rc = 0;
754
755 init_completion(&vfe_dev->reset_complete);
756
757 if (first_start) {
758 msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0xC);
759 } else {
760 msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0xC);
761 msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
762 msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
763 msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
764 vfe_dev->hw_info->vfe_ops.axi_ops.
765 reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0003FFFF);
766 }
767
768
769 if (blocking_call) {
770 rc = wait_for_completion_timeout(
771 &vfe_dev->reset_complete, msecs_to_jiffies(50));
772 }
773 return rc;
774}
775
776static void msm_vfe40_axi_reload_wm(struct vfe_device *vfe_dev,
777 void __iomem *vfe_base, uint32_t reload_mask)
778{
779 msm_camera_io_w_mb(reload_mask, vfe_base + 0x4C);
780}
781
782static void msm_vfe40_axi_update_cgc_override(struct vfe_device *vfe_dev,
783 uint8_t wm_idx, uint8_t enable)
784{
785 uint32_t val;
786
787 /* Change CGC override */
788 val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
789 if (enable)
790 val |= (1 << wm_idx);
791 else
792 val &= ~(1 << wm_idx);
793 msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x974);
794}
795
796static void msm_vfe40_axi_enable_wm(void __iomem *vfe_base,
797 uint8_t wm_idx, uint8_t enable)
798{
799 uint32_t val;
800
801 val = msm_camera_io_r(vfe_base + VFE40_WM_BASE(wm_idx));
802 if (enable)
803 val |= 0x1;
804 else
805 val &= ~0x1;
806 msm_camera_io_w_mb(val,
807 vfe_base + VFE40_WM_BASE(wm_idx));
808}
809
810static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
811 struct msm_vfe_axi_stream *stream_info)
812{
813 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
814 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
815 uint32_t comp_mask, comp_mask_index;
816
817 comp_mask_index = stream_info->comp_mask_index[vfe_idx];
818
819 comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
820 comp_mask &= ~(0x7F << (comp_mask_index * 8));
821 comp_mask |= (axi_data->composite_info[comp_mask_index].
822 stream_composite_mask << (comp_mask_index * 8));
823
824 msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
825 msm_vfe40_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
826 MSM_ISP_IRQ_ENABLE);
827}
828
829static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
830 struct msm_vfe_axi_stream *stream_info)
831{
832 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
833 uint32_t comp_mask, comp_mask_index;
834
835 comp_mask_index = stream_info->comp_mask_index[vfe_idx];
836 vfe_dev->irq0_mask &= ~BIT(27);
837
838 comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
839 comp_mask &= ~(0x7F << (comp_mask_index * 8));
840
841 msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
842 msm_vfe40_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
843 MSM_ISP_IRQ_DISABLE);
844}
845
846static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
847 struct msm_vfe_axi_stream *stream_info)
848{
849 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
850
851 msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
852 MSM_ISP_IRQ_ENABLE);
853}
854
855static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
856 struct msm_vfe_axi_stream *stream_info)
857{
858 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
859
860 vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[vfe_idx][0] + 8));
861 msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
862 0, MSM_ISP_IRQ_DISABLE);
863}
864
865static void msm_vfe40_cfg_framedrop(struct vfe_device *vfe_dev,
866 struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
867 uint32_t framedrop_period)
868{
869 void __iomem *vfe_base = vfe_dev->vfe_base;
870 uint32_t i, temp;
871 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
872
873 for (i = 0; i < stream_info->num_planes; i++) {
874 msm_camera_io_w(framedrop_pattern, vfe_base +
875 VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
876 temp = msm_camera_io_r(vfe_base +
877 VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
878 temp &= 0xFFFFFF83;
879 msm_camera_io_w(temp | (framedrop_period - 1) << 2,
880 vfe_base + VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
881 }
882
883 msm_camera_io_w_mb(0x1, vfe_base + 0x378);
884}
885
886static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
887 struct msm_vfe_axi_stream *stream_info)
888{
889 uint32_t i;
890 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
891
892 for (i = 0; i < stream_info->num_planes; i++)
893 msm_camera_io_w(0, vfe_dev->vfe_base +
894 VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
895}
896
897static int32_t msm_vfe40_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
898{
899 int rc = 0;
900
901 switch (bpp) {
902 case 8:
903 *bpp_reg = 0;
904 break;
905 case 10:
906 *bpp_reg = 1 << 0;
907 break;
908 case 12:
909 *bpp_reg = 1 << 1;
910 break;
911 default:
912 pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
913 return -EINVAL;
914 }
915 return rc;
916}
917
918static int32_t msm_vfe40_convert_io_fmt_to_reg(
919 enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
920{
921 int rc = 0;
922
923 switch (pack_format) {
924 case QCOM:
925 *pack_reg = 0x0;
926 break;
927 case MIPI:
928 *pack_reg = 0x1;
929 break;
930 case DPCM6:
931 *pack_reg = 0x2;
932 break;
933 case DPCM8:
934 *pack_reg = 0x3;
935 break;
936 case PLAIN8:
937 *pack_reg = 0x4;
938 break;
939 case PLAIN16:
940 *pack_reg = 0x5;
941 break;
942 default:
943 pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
944 return -EINVAL;
945 }
946 return rc;
947}
948
949static int32_t msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
950 enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
951{
952 int rc = 0;
953 int bpp = 0, read_bpp = 0;
954 enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
955 uint32_t bpp_reg = 0, pack_reg = 0;
956 uint32_t read_bpp_reg = 0, read_pack_reg = 0;
957 uint32_t io_format_reg = 0; /*io format register bit*/
958
959 io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
960 if ((stream_src < RDI_INTF_0) &&
961 (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
962 EXTERNAL_READ)) {
963 read_bpp = msm_isp_get_bit_per_pixel(
964 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
965 rc = msm_vfe40_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
966 if (rc < 0) {
967 pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
968 __func__, read_bpp, rc);
969 return rc;
970 }
971 read_pack_fmt = msm_isp_get_pack_format(
972 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
973 rc = msm_vfe40_convert_io_fmt_to_reg(
974 read_pack_fmt, &read_pack_reg);
975 if (rc < 0) {
976 pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
977 __func__, rc);
978 return rc;
979 }
980 /*use input format(v4l2_pix_fmt) to get pack format*/
981 io_format_reg &= 0xFFC8FFFF;
982 io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
983 }
984
985 bpp = msm_isp_get_bit_per_pixel(io_format);
986 rc = msm_vfe40_convert_bpp_to_reg(bpp, &bpp_reg);
987 if (rc < 0) {
988 pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
989 __func__, bpp, rc);
990 return rc;
991 }
992
993 switch (stream_src) {
994 case PIX_ENCODER:
995 case PIX_VIEWFINDER:
996 case CAMIF_RAW:
997 io_format_reg &= 0xFFFFCFFF;
998 io_format_reg |= bpp_reg << 12;
999 break;
1000 case IDEAL_RAW:
1001 /*use output format(v4l2_pix_fmt) to get pack format*/
1002 pack_fmt = msm_isp_get_pack_format(io_format);
1003 rc = msm_vfe40_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
1004 if (rc < 0) {
1005 pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
1006 __func__, rc);
1007 return rc;
1008 }
1009 io_format_reg &= 0xFFFFFFC8;
1010 io_format_reg |= bpp_reg << 4 | pack_reg;
1011 break;
1012 case RDI_INTF_0:
1013 case RDI_INTF_1:
1014 case RDI_INTF_2:
1015 default:
1016 pr_err("%s: Invalid stream source\n", __func__);
1017 return -EINVAL;
1018 }
1019 msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x54);
1020 return 0;
1021}
1022
1023static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev,
1024 void *arg)
1025{
1026 int rc = 0;
1027 uint32_t bufq_handle = 0;
1028 struct msm_isp_buffer *buf = NULL;
1029 struct msm_vfe_fetch_eng_start *fe_cfg = arg;
1030 struct msm_isp_buffer_mapped_info mapped_info;
1031
1032 if (vfe_dev->fetch_engine_info.is_busy == 1) {
1033 pr_err("%s: fetch engine busy\n", __func__);
1034 return -EINVAL;
1035 }
1036 memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
1037 /* There is other option of passing buffer address from user,
1038 *in such case, driver needs to map the buffer and use it
1039 */
1040 vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
1041 vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
1042 vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
1043 vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
1044
1045 if (!fe_cfg->offline_mode) {
1046 bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
1047 vfe_dev->buf_mgr, fe_cfg->session_id,
1048 fe_cfg->stream_id);
1049 vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
1050
1051 rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
1052 vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
1053 if (rc < 0 || !buf) {
1054 pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
1055 __func__, rc, buf);
1056 return -EINVAL;
1057 }
1058 mapped_info = buf->mapped_info[0];
1059 buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
1060 } else {
1061 rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
1062 &mapped_info, fe_cfg->fd);
1063 if (rc < 0) {
1064 pr_err("%s: can not map buffer\n", __func__);
1065 return -EINVAL;
1066 }
1067 }
1068 vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
1069 vfe_dev->fetch_engine_info.is_busy = 1;
1070
1071 msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x228);
1072 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x378);
1073
1074 msm_camera_io_w_mb(0x10000, vfe_dev->vfe_base + 0x4C);
1075 msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x4C);
1076
1077 ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
1078 return 0;
1079}
1080
1081static int msm_vfe40_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
1082 void *arg)
1083{
1084 int rc = 0;
1085 uint32_t bufq_handle = 0;
1086 struct msm_isp_buffer *buf = NULL;
1087 struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
1088 struct msm_isp_buffer_mapped_info mapped_info;
1089
1090 if (vfe_dev->fetch_engine_info.is_busy == 1) {
1091 pr_err("%s: fetch engine busy\n", __func__);
1092 return -EINVAL;
1093 }
1094 memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
1095 /* There is other option of passing buffer address from user,
1096 * in such case, driver needs to map the buffer and use it
1097 */
1098 vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
1099 vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
1100 vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
1101 vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
1102
1103 if (!fe_cfg->offline_mode) {
1104 bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
1105 vfe_dev->buf_mgr, fe_cfg->session_id,
1106 fe_cfg->stream_id);
1107 vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
1108
1109 rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
1110 vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
1111 if (rc < 0 || !buf) {
1112 pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
1113 __func__, rc, buf);
1114 return -EINVAL;
1115 }
1116 mapped_info = buf->mapped_info[0];
1117 buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
1118 } else {
1119 rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
1120 &mapped_info, fe_cfg->fd);
1121 if (rc < 0) {
1122 pr_err("%s: can not map buffer\n", __func__);
1123 return -EINVAL;
1124 }
1125 }
1126 vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
1127 vfe_dev->fetch_engine_info.is_busy = 1;
1128
1129 msm_camera_io_w(mapped_info.paddr + fe_cfg->input_buf_offset,
1130 vfe_dev->vfe_base + 0x228);
1131
1132 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x378);
1133
1134 msm_camera_io_w_mb(0x10000, vfe_dev->vfe_base + 0x4C);
1135 msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x4C);
1136
1137 ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
1138 return 0;
1139}
1140
1141static void msm_vfe40_cfg_fetch_engine(struct vfe_device *vfe_dev,
1142 struct msm_vfe_pix_cfg *pix_cfg)
1143{
1144 uint32_t x_size_word;
1145 uint32_t temp = 0;
1146 uint32_t main_unpack_pattern = 0;
1147 struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
1148
1149 if (pix_cfg->input_mux != EXTERNAL_READ) {
1150 pr_err("%s: Invalid mux configuration - mux: %d",
1151 __func__, pix_cfg->input_mux);
1152 return;
1153 }
1154
1155 fe_cfg = &pix_cfg->fetch_engine_cfg;
1156 pr_debug("%s: fetch_dbg wd x ht buf = %d x %d, fe = %d x %d\n",
1157 __func__, fe_cfg->buf_width, fe_cfg->buf_height,
1158 fe_cfg->fetch_width, fe_cfg->fetch_height);
1159
1160 vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
1161 VFE40_BUS_RD_CGC_OVERRIDE_BIT, 1);
1162
1163 temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1164 temp &= 0xFFFFFFFD;
1165 temp |= (1 << 1);
1166 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
1167
1168 msm_vfe40_config_irq(vfe_dev, (1 << 24), 0,
1169 MSM_ISP_IRQ_ENABLE);
1170
1171 msm_camera_io_w((fe_cfg->fetch_height - 1),
1172 vfe_dev->vfe_base + 0x238);
1173
1174 /* need to update to use formulae to calculate X_SIZE_WORD*/
1175 x_size_word = msm_isp_cal_word_per_line(
1176 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
1177 fe_cfg->buf_width);
1178
1179 msm_camera_io_w((x_size_word - 1) << 16, vfe_dev->vfe_base + 0x23C);
1180
1181 x_size_word = msm_isp_cal_word_per_line(
1182 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
1183 fe_cfg->fetch_width);
1184
1185 temp = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
1186 temp |= 2 << 16 | pix_cfg->pixel_pattern;
1187 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x1C);
1188
1189 if (vfe_dev->vfe_hw_version == VFE40_8953_VERSION) {
1190 msm_camera_io_w(x_size_word << 17 |
1191 (fe_cfg->buf_height-1) << 4 |
1192 VFE40_FETCH_BURST_LEN,
1193 vfe_dev->vfe_base + 0x240);
1194 msm_camera_io_w(0 << 29 | 2 << 26 |
1195 (fe_cfg->buf_width - 1) << 13 |
1196 (fe_cfg->buf_height - 1),
1197 vfe_dev->vfe_base + 0x244);
1198 } else {
1199 msm_camera_io_w(x_size_word << 16 |
1200 (fe_cfg->buf_height-1) << 4 |
1201 VFE40_FETCH_BURST_LEN,
1202 vfe_dev->vfe_base + 0x240);
1203 msm_camera_io_w(0 << 28 | 2 << 25 |
1204 (fe_cfg->buf_width - 1) << 12 |
1205 (fe_cfg->buf_height - 1),
1206 vfe_dev->vfe_base + 0x244);
1207 }
1208
1209 /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
1210 switch (vfe_dev->axi_data.src_info[VFE_PIX_0].input_format) {
1211 case V4L2_PIX_FMT_P16BGGR10:
1212 case V4L2_PIX_FMT_P16GBRG10:
1213 case V4L2_PIX_FMT_P16GRBG10:
1214 case V4L2_PIX_FMT_P16RGGB10:
1215 main_unpack_pattern = 0xB210;
1216 break;
1217 default:
1218 main_unpack_pattern = 0xF6543210;
1219 break;
1220 }
1221 msm_camera_io_w(main_unpack_pattern,
1222 vfe_dev->vfe_base + 0x248);
1223 msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x264);
1224
1225}
1226
1227static void msm_vfe40_cfg_testgen(struct vfe_device *vfe_dev,
1228 struct msm_vfe_testgen_cfg *testgen_cfg)
1229{
1230 uint32_t bit_per_pixel = 0;
1231 uint32_t bpp_reg = 0;
1232 uint32_t bayer_pix_pattern_reg = 0;
1233 uint32_t unicolorbar_reg = 0;
1234 uint32_t unicolor_enb = 0;
1235
1236 bit_per_pixel = msm_isp_get_bit_per_pixel(
1237 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
1238
1239 switch (bit_per_pixel) {
1240 case 8:
1241 bpp_reg = 0x0;
1242 break;
1243 case 10:
1244 bpp_reg = 0x1;
1245 break;
1246 case 12:
1247 bpp_reg = 0x10;
1248 break;
1249 case 14:
1250 bpp_reg = 0x11;
1251 break;
1252 default:
1253 pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
1254 break;
1255 }
1256
1257 msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
1258 vfe_dev->vfe_base + 0x940);
1259
1260 msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
1261 (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0x944);
1262
1263 msm_camera_io_w(testgen_cfg->h_blank, vfe_dev->vfe_base + 0x958);
1264
1265 msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
1266 vfe_dev->vfe_base + 0x95C);
1267
1268 switch (testgen_cfg->pixel_bayer_pattern) {
1269 case ISP_BAYER_RGRGRG:
1270 bayer_pix_pattern_reg = 0x0;
1271 break;
1272 case ISP_BAYER_GRGRGR:
1273 bayer_pix_pattern_reg = 0x1;
1274 break;
1275 case ISP_BAYER_BGBGBG:
1276 bayer_pix_pattern_reg = 0x10;
1277 break;
1278 case ISP_BAYER_GBGBGB:
1279 bayer_pix_pattern_reg = 0x11;
1280 break;
1281 default:
1282 pr_err("%s: invalid pix pattern %d\n",
1283 __func__, bit_per_pixel);
1284 break;
1285 }
1286
1287 if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
1288 unicolor_enb = 0x0;
1289 } else {
1290 unicolor_enb = 0x1;
1291 switch (testgen_cfg->color_bar_pattern) {
1292 case UNICOLOR_WHITE:
1293 unicolorbar_reg = 0x0;
1294 break;
1295 case UNICOLOR_YELLOW:
1296 unicolorbar_reg = 0x1;
1297 break;
1298 case UNICOLOR_CYAN:
1299 unicolorbar_reg = 0x10;
1300 break;
1301 case UNICOLOR_GREEN:
1302 unicolorbar_reg = 0x11;
1303 break;
1304 case UNICOLOR_MAGENTA:
1305 unicolorbar_reg = 0x100;
1306 break;
1307 case UNICOLOR_RED:
1308 unicolorbar_reg = 0x101;
1309 break;
1310 case UNICOLOR_BLUE:
1311 unicolorbar_reg = 0x110;
1312 break;
1313 case UNICOLOR_BLACK:
1314 unicolorbar_reg = 0x111;
1315 break;
1316 default:
1317 pr_err("%s: invalid colorbar %d\n",
1318 __func__, testgen_cfg->color_bar_pattern);
1319 break;
1320 }
1321 }
1322 msm_camera_io_w((testgen_cfg->rotate_period << 8) |
1323 (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
1324 (unicolorbar_reg), vfe_dev->vfe_base + 0x968);
1325}
1326
1327static void msm_vfe40_cfg_camif(struct vfe_device *vfe_dev,
1328 struct msm_vfe_pix_cfg *pix_cfg)
1329{
1330 uint16_t first_pixel, last_pixel, first_line, last_line;
1331 uint16_t epoch_line1;
1332 struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
1333 uint32_t val, subsample_period, subsample_pattern;
1334 struct msm_vfe_camif_subsample_cfg *subsample_cfg =
1335 &pix_cfg->camif_cfg.subsample_cfg;
1336 uint16_t bus_sub_en = 0;
1337
1338 vfe_dev->dual_vfe_enable = camif_cfg->is_split;
1339
1340 msm_camera_io_w(pix_cfg->input_mux << 16 | pix_cfg->pixel_pattern,
1341 vfe_dev->vfe_base + 0x1C);
1342
1343 first_pixel = camif_cfg->first_pixel;
1344 last_pixel = camif_cfg->last_pixel;
1345 first_line = camif_cfg->first_line;
1346 last_line = camif_cfg->last_line;
1347 epoch_line1 = camif_cfg->epoch_line1;
1348
1349 if ((epoch_line1 <= 0) || (epoch_line1 > last_line))
1350 epoch_line1 = last_line - 50;
1351
1352 if ((last_line - epoch_line1) > 100)
1353 epoch_line1 = last_line - 100;
1354
1355 subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
1356 subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
1357
1358 msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
1359 camif_cfg->pixels_per_line, vfe_dev->vfe_base + 0x300);
1360
1361 msm_camera_io_w(first_pixel << 16 | last_pixel,
1362 vfe_dev->vfe_base + 0x304);
1363
1364 msm_camera_io_w(first_line << 16 | last_line,
1365 vfe_dev->vfe_base + 0x308);
1366
1367 /* configure EPOCH0: 20 lines, and
1368 * configure EPOCH1: epoch_line1 before EOF
1369 */
1370 msm_camera_io_w_mb(0x140000 | epoch_line1,
1371 vfe_dev->vfe_base + 0x318);
1372 pr_debug("%s:%d: epoch_line1: %d\n",
1373 __func__, __LINE__, epoch_line1);
1374 if (subsample_period && subsample_pattern) {
1375 val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
1376 val &= 0xFFE0FFFF;
1377 val = (subsample_period - 1) << 16;
1378 msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
1379 ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
1380 __func__, subsample_period, subsample_pattern);
1381
1382 val = subsample_pattern;
1383 msm_camera_io_w(val, vfe_dev->vfe_base + 0x314);
1384 } else {
1385 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x314);
1386 }
1387 val = msm_camera_io_r(vfe_dev->vfe_base + 0x2E8);
1388 val |= camif_cfg->camif_input;
1389 msm_camera_io_w(val, vfe_dev->vfe_base + 0x2E8);
1390
1391 if (subsample_cfg->pixel_skip || subsample_cfg->line_skip) {
1392 bus_sub_en = 1;
1393 val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
1394 val &= 0xFFFFFFDF;
1395 val = val | bus_sub_en << 5;
1396 msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
1397 subsample_cfg->pixel_skip &= 0x0000FFFF;
1398 subsample_cfg->line_skip &= 0x0000FFFF;
1399 msm_camera_io_w((subsample_cfg->line_skip << 16) |
1400 subsample_cfg->pixel_skip,
1401 vfe_dev->vfe_base + 0x30C);
1402 if (subsample_cfg->first_pixel ||
1403 subsample_cfg->last_pixel ||
1404 subsample_cfg->first_line ||
1405 subsample_cfg->last_line) {
1406 msm_camera_io_w(
1407 subsample_cfg->first_pixel << 16 |
1408 subsample_cfg->last_pixel,
1409 vfe_dev->vfe_base + 0x8A4);
1410 msm_camera_io_w(
1411 subsample_cfg->first_line << 16 |
1412 subsample_cfg->last_line,
1413 vfe_dev->vfe_base + 0x8A8);
1414 val = msm_camera_io_r(
1415 vfe_dev->vfe_base + 0x2F8);
1416 val |= 1 << 22;
1417 msm_camera_io_w(val,
1418 vfe_dev->vfe_base + 0x2F8);
1419 }
1420
1421 ISP_DBG("%s:camif raw op fmt %d\n",
1422 __func__, subsample_cfg->output_format);
1423 /* Pdaf output will be sent in PLAIN16 format*/
1424 val = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
1425 switch (subsample_cfg->output_format) {
1426 case CAMIF_PLAIN_8:
1427 val |= 4 << 9;
1428 break;
1429 case CAMIF_PLAIN_16:
1430 val |= 5 << 9;
1431 break;
1432 case CAMIF_MIPI_RAW:
1433 val |= 1 << 9;
1434 break;
1435 case CAMIF_QCOM_RAW:
1436 default:
1437 break;
1438 }
1439 msm_camera_io_w(val, vfe_dev->vfe_base + 0x54);
1440 }
1441}
1442
1443static void msm_vfe40_cfg_input_mux(struct vfe_device *vfe_dev,
1444 struct msm_vfe_pix_cfg *pix_cfg)
1445{
1446 uint32_t core_cfg = 0;
1447 uint32_t val = 0;
1448
1449 core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
1450 core_cfg &= 0xFFFCFFFF;
1451
1452 switch (pix_cfg->input_mux) {
1453 case CAMIF:
1454 core_cfg |= 0x0 << 16;
1455 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
1456 msm_vfe40_cfg_camif(vfe_dev, pix_cfg);
1457 break;
1458 case TESTGEN:
1459 /* Change CGC override */
1460 val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
1461 val |= (1 << 31);
1462 msm_camera_io_w(val, vfe_dev->vfe_base + 0x974);
1463
1464 /* CAMIF and TESTGEN will both go thorugh CAMIF*/
1465 core_cfg |= 0x1 << 16;
1466 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
1467 msm_vfe40_cfg_camif(vfe_dev, pix_cfg);
1468 msm_vfe40_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
1469 break;
1470 case EXTERNAL_READ:
1471 core_cfg |= 0x2 << 16;
1472 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
1473 msm_vfe40_cfg_fetch_engine(vfe_dev, pix_cfg);
1474 break;
1475 default:
1476 pr_err("%s: Unsupported input mux %d\n",
1477 __func__, pix_cfg->input_mux);
1478 break;
1479 }
1480}
1481
1482static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
1483 enum msm_isp_camif_update_state update_state)
1484{
1485 uint32_t val;
1486 bool bus_en, vfe_en;
1487
1488 if (update_state == NO_UPDATE)
1489 return;
1490
1491 if (update_state == ENABLE_CAMIF) {
1492 msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x30);
1493 msm_camera_io_w_mb(0x81, vfe_dev->vfe_base + 0x34);
1494 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
1495 msm_vfe40_config_irq(vfe_dev, 0xFF, 0x81,
1496 MSM_ISP_IRQ_ENABLE);
1497
1498 bus_en =
1499 ((vfe_dev->axi_data.
1500 src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
1501 vfe_en =
1502 ((vfe_dev->axi_data.
1503 src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
1504 val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
1505 val &= 0xFFFFFF3F;
1506 val = val | bus_en << 7 | vfe_en << 6;
1507 msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
1508 /* testgen GO*/
1509 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1510 msm_camera_io_w(1, vfe_dev->vfe_base + 0x93C);
1511 msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x2F4);
1512 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2F4);
1513 vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
1514 } else if (update_state == DISABLE_CAMIF ||
1515 update_state == DISABLE_CAMIF_IMMEDIATELY) {
1516 uint32_t poll_val;
1517
1518 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1519 update_state = DISABLE_CAMIF;
1520 msm_vfe40_config_irq(vfe_dev, 0, 0x81,
1521 MSM_ISP_IRQ_DISABLE);
1522 val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
1523 /* disable danger signal */
1524 msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
1525 msm_camera_io_w_mb((update_state == DISABLE_CAMIF ? 0x0 : 0x6),
1526 vfe_dev->vfe_base + 0x2F4);
1527 if (readl_poll_timeout_atomic(vfe_dev->vfe_base + 0x31C,
1528 poll_val, poll_val & 0x80000000, 1000, 2000000))
1529 pr_err("%s: camif disable failed %x\n",
1530 __func__, poll_val);
1531 vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
1532 /* testgen OFF*/
1533 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1534 msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0x93C);
1535 msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
1536 msm_camera_io_w((1 << 0), vfe_dev->vfe_base + 0x34);
1537 msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
1538 msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask,
1539 vfe_dev->irq1_mask,
1540 MSM_ISP_IRQ_SET);
1541 }
1542}
1543
1544static void msm_vfe40_cfg_rdi_reg(
1545 struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
1546 enum msm_vfe_input_src input_src)
1547{
1548 uint8_t rdi = input_src - VFE_RAW_0;
1549 uint32_t rdi_reg_cfg;
1550
1551 rdi_reg_cfg = msm_camera_io_r(
1552 vfe_dev->vfe_base + VFE40_RDI_BASE(0));
1553 rdi_reg_cfg &= ~(BIT(16 + rdi));
1554 rdi_reg_cfg |= rdi_cfg->frame_based << (16 + rdi);
1555 msm_camera_io_w(rdi_reg_cfg,
1556 vfe_dev->vfe_base + VFE40_RDI_BASE(0));
1557
1558 rdi_reg_cfg = msm_camera_io_r(
1559 vfe_dev->vfe_base + VFE40_RDI_BASE(rdi));
1560 rdi_reg_cfg &= 0x70003;
1561 rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
1562 msm_camera_io_w(
1563 rdi_reg_cfg, vfe_dev->vfe_base + VFE40_RDI_BASE(rdi));
1564}
1565
1566static void msm_vfe40_axi_cfg_wm_reg(
1567 struct vfe_device *vfe_dev,
1568 struct msm_vfe_axi_stream *stream_info,
1569 uint8_t plane_idx)
1570{
1571 uint32_t val;
1572 uint32_t burst_len, wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
1573 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1574 uint32_t wm_base;
1575
1576 wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
1577
1578 if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
1579 vfe_dev->vfe_hw_version == VFE40_8939_VERSION) {
1580 burst_len = VFE40_BURST_LEN_8916_VERSION;
1581 wm_bit_shift = VFE40_WM_BIT_SHIFT;
1582 } else if (vfe_dev->vfe_hw_version == VFE40_8952_VERSION) {
1583 burst_len = VFE40_BURST_LEN_8952_VERSION;
1584 wm_bit_shift = VFE40_WM_BIT_SHIFT;
1585 } else if (vfe_dev->vfe_hw_version == VFE40_8976_VERSION ||
1586 vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
1587 vfe_dev->vfe_hw_version == VFE40_8917_VERSION ||
1588 vfe_dev->vfe_hw_version == VFE40_8953_VERSION) {
1589 burst_len = VFE40_BURST_LEN_8952_VERSION;
1590 wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
1591 } else {
1592 burst_len = VFE40_BURST_LEN;
1593 }
1594
1595 if (!stream_info->frame_based) {
1596 msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
1597 /*WR_IMAGE_SIZE*/
1598 val =
1599 ((msm_isp_cal_word_per_line(
1600 stream_info->output_format,
1601 stream_info->plane_cfg[vfe_idx][plane_idx].
1602 output_width)+1)/2 - 1) << 16 |
1603 (stream_info->plane_cfg[vfe_idx][plane_idx].
1604 output_height - 1);
1605 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
1606
1607 /*WR_BUFFER_CFG*/
1608 val =
1609 msm_isp_cal_word_per_line(stream_info->output_format,
1610 stream_info->plane_cfg[vfe_idx][
1611 plane_idx].output_stride) << 16 |
1612 (stream_info->plane_cfg[vfe_idx][
1613 plane_idx].output_height - 1) << wm_bit_shift |
1614 burst_len;
1615 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
1616 } else {
1617 msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
1618 val =
1619 msm_isp_cal_word_per_line(stream_info->output_format,
1620 stream_info->plane_cfg[vfe_idx][
1621 plane_idx].output_width) << 16 |
1622 (stream_info->plane_cfg[vfe_idx][
1623 plane_idx].output_height - 1) << 4 |
1624 burst_len;
1625 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
1626 }
1627
1628 /*WR_IRQ_SUBSAMPLE_PATTERN*/
1629 msm_camera_io_w(0xFFFFFFFF,
1630 vfe_dev->vfe_base + wm_base + 0x20);
1631 /* TD: Add IRQ subsample pattern */
1632}
1633
1634static void msm_vfe40_axi_clear_wm_reg(
1635 struct vfe_device *vfe_dev,
1636 struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
1637{
1638 uint32_t val = 0;
1639 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1640 uint32_t wm_base;
1641
1642 wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
1643 /*WR_ADDR_CFG*/
1644 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
1645 /*WR_IMAGE_SIZE*/
1646 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
1647 /*WR_BUFFER_CFG*/
1648 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
1649 /*WR_IRQ_SUBSAMPLE_PATTERN*/
1650 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
1651}
1652
1653static void msm_vfe40_axi_cfg_wm_xbar_reg(
1654 struct vfe_device *vfe_dev,
1655 struct msm_vfe_axi_stream *stream_info,
1656 uint8_t plane_idx)
1657{
1658 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1659 struct msm_vfe_axi_plane_cfg *plane_cfg;
1660 uint8_t wm;
1661 uint32_t xbar_cfg = 0;
1662 uint32_t xbar_reg_cfg = 0;
1663
1664 plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
1665 wm = stream_info->wm[vfe_idx][plane_idx];
1666
1667 switch (stream_info->stream_src) {
1668 case PIX_ENCODER:
1669 case PIX_VIEWFINDER: {
1670 if (plane_cfg->output_plane_format != CRCB_PLANE &&
1671 plane_cfg->output_plane_format != CBCR_PLANE) {
1672 /*SINGLE_STREAM_SEL*/
1673 xbar_cfg |= plane_cfg->output_plane_format << 8;
1674 } else {
1675 switch (stream_info->output_format) {
1676 case V4L2_PIX_FMT_NV12:
1677 case V4L2_PIX_FMT_NV14:
1678 case V4L2_PIX_FMT_NV16:
1679 case V4L2_PIX_FMT_NV24:
1680 xbar_cfg |= 0x3 << 4; /*PAIR_STREAM_SWAP_CTRL*/
1681 break;
1682 }
1683 xbar_cfg |= 0x1 << 1; /*PAIR_STREAM_EN*/
1684 }
1685 if (stream_info->stream_src == PIX_VIEWFINDER)
1686 xbar_cfg |= 0x1; /*VIEW_STREAM_EN*/
1687 break;
1688 }
1689 case CAMIF_RAW:
1690 xbar_cfg = 0x300;
1691 break;
1692 case IDEAL_RAW:
1693 xbar_cfg = 0x400;
1694 break;
1695 case RDI_INTF_0:
1696 xbar_cfg = 0x500;
1697 break;
1698 case RDI_INTF_1:
1699 xbar_cfg = 0x600;
1700 break;
1701 case RDI_INTF_2:
1702 xbar_cfg = 0x700;
1703 break;
1704 default:
1705 pr_err("%s: Invalid stream src\n", __func__);
1706 break;
1707 }
1708 xbar_reg_cfg =
1709 msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
1710 xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
1711 xbar_reg_cfg |= (xbar_cfg << VFE40_XBAR_SHIFT(wm));
1712 msm_camera_io_w(xbar_reg_cfg,
1713 vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
1714}
1715
1716static void msm_vfe40_axi_clear_wm_xbar_reg(
1717 struct vfe_device *vfe_dev,
1718 struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
1719{
1720 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1721 uint8_t wm;
1722 uint32_t xbar_reg_cfg = 0;
1723
1724 wm = stream_info->wm[vfe_idx][plane_idx];
1725
1726 xbar_reg_cfg =
1727 msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
1728 xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
1729 msm_camera_io_w(xbar_reg_cfg,
1730 vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
1731}
1732
1733static void msm_vfe40_read_wm_ping_pong_addr(
1734 struct vfe_device *vfe_dev)
1735{
1736 msm_camera_io_dump(vfe_dev->vfe_base +
1737 (VFE40_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
1738}
1739
1740static void msm_vfe40_update_ping_pong_addr(
1741 void __iomem *vfe_base,
1742 uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
1743 int32_t buf_size)
1744{
1745 uint32_t paddr32 = (paddr & 0xFFFFFFFF);
1746
1747 msm_camera_io_w(paddr32, vfe_base +
1748 VFE40_PING_PONG_BASE(wm_idx, pingpong_bit));
1749}
1750
1751static void msm_vfe40_set_halt_restart_mask(struct vfe_device *vfe_dev)
1752{
1753 msm_vfe40_config_irq(vfe_dev, BIT(31), BIT(8), MSM_ISP_IRQ_SET);
1754}
1755
1756static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
1757 uint32_t blocking)
1758{
1759 int rc = 0;
1760 enum msm_vfe_input_src i;
1761 struct msm_isp_timestamp ts;
1762
1763 /* Keep only halt and restart mask */
1764 msm_vfe40_config_irq(vfe_dev, (1 << 31), (1 << 8),
1765 MSM_ISP_IRQ_SET);
1766
1767 msm_isp_get_timestamp(&ts, vfe_dev);
1768 /* if any stream is waiting for update, signal complete */
1769 for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
1770 msm_isp_axi_stream_update(vfe_dev, i, &ts);
1771 msm_isp_axi_stream_update(vfe_dev, i, &ts);
1772 }
1773
1774 msm_isp_stats_stream_update(vfe_dev);
1775 msm_isp_stats_stream_update(vfe_dev);
1776
1777 if (blocking) {
1778 init_completion(&vfe_dev->halt_complete);
1779 /* Halt AXI Bus Bridge */
1780 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
1781 rc = wait_for_completion_interruptible_timeout(
1782 &vfe_dev->halt_complete, msecs_to_jiffies(500));
1783 if (rc <= 0)
1784 pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
1785 vfe_dev->pdev->id, rc);
1786 } else {
1787 /* Halt AXI Bus Bridge */
1788 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
1789 }
1790
1791 return rc;
1792}
1793
1794static void msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
1795 uint32_t blocking, uint32_t enable_camif)
1796{
1797 msm_vfe40_config_irq(vfe_dev, vfe_dev->recovery_irq0_mask,
1798 vfe_dev->recovery_irq1_mask,
1799 MSM_ISP_IRQ_ENABLE);
1800 msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
1801
1802 /* Start AXI */
1803 msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
1804
1805 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
1806 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
1807 atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
1808 if (enable_camif)
1809 vfe_dev->hw_info->vfe_ops.core_ops.
1810 update_camif_state(vfe_dev, ENABLE_CAMIF);
1811}
1812
1813static uint32_t msm_vfe40_get_wm_mask(
1814 uint32_t irq_status0, uint32_t irq_status1)
1815{
1816 return (irq_status0 >> 8) & 0x7F;
1817}
1818
1819static uint32_t msm_vfe40_get_comp_mask(
1820 uint32_t irq_status0, uint32_t irq_status1)
1821{
1822 return (irq_status0 >> 25) & 0xF;
1823}
1824
1825static uint32_t msm_vfe40_get_pingpong_status(
1826 struct vfe_device *vfe_dev)
1827{
1828 return msm_camera_io_r(vfe_dev->vfe_base + 0x268);
1829}
1830
1831static int msm_vfe40_get_stats_idx(enum msm_isp_stats_type stats_type)
1832{
1833 switch (stats_type) {
1834 case MSM_ISP_STATS_BE:
1835 return 0;
1836 case MSM_ISP_STATS_BG:
1837 return 1;
1838 case MSM_ISP_STATS_BF:
1839 return 2;
1840 case MSM_ISP_STATS_AWB:
1841 return 3;
1842 case MSM_ISP_STATS_RS:
1843 return 4;
1844 case MSM_ISP_STATS_CS:
1845 return 5;
1846 case MSM_ISP_STATS_IHIST:
1847 return 6;
1848 case MSM_ISP_STATS_BHIST:
1849 return 7;
1850 default:
1851 pr_err("%s: Invalid stats type\n", __func__);
1852 return -EINVAL;
1853 }
1854}
1855
1856static int msm_vfe40_stats_check_streams(
1857 struct msm_vfe_stats_stream *stream_info)
1858{
1859 return 0;
1860}
1861
1862static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
1863 uint32_t stats_mask, uint8_t request_comp_index, uint8_t enable)
1864{
1865 uint32_t comp_mask_reg, mask_bf_scale;
1866 atomic_t *stats_comp_mask;
1867 struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
1868
1869 if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
1870 return;
1871
1872 if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
1873 pr_err("%s: num of comp masks %d exceed max %d\n",
1874 __func__, request_comp_index,
1875 MAX_NUM_STATS_COMP_MASK);
1876 return;
1877 }
1878
1879 if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
1880 MAX_NUM_STATS_COMP_MASK) {
1881 pr_err("%s: num of comp masks %d exceed max %d\n",
1882 __func__,
1883 vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
1884 MAX_NUM_STATS_COMP_MASK);
1885 return;
1886 }
1887
1888 stats_mask = stats_mask & 0xFF;
1889 mask_bf_scale = stats_mask;
1890
1891 stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
1892 comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
1893
1894 if (enable) {
1895 comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
1896 atomic_set(stats_comp_mask, stats_mask |
1897 atomic_read(stats_comp_mask));
1898 msm_vfe40_config_irq(vfe_dev,
1899 1 << (request_comp_index + 29), 0,
1900 MSM_ISP_IRQ_ENABLE);
1901 } else {
1902 if (!(atomic_read(stats_comp_mask) & stats_mask))
1903 return;
1904 atomic_set(stats_comp_mask,
1905 ~stats_mask & atomic_read(stats_comp_mask));
1906 comp_mask_reg &= ~(mask_bf_scale <<
1907 (16 + request_comp_index * 8));
1908 msm_vfe40_config_irq(vfe_dev,
1909 1 << (request_comp_index + 29), 0,
1910 MSM_ISP_IRQ_DISABLE);
1911 }
1912 msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
1913
1914 ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
1915 __func__, comp_mask_reg,
1916 atomic_read(&stats_data->stats_comp_mask[0]),
1917 atomic_read(&stats_data->stats_comp_mask[1]));
1918
1919}
1920
1921static void msm_vfe40_stats_cfg_wm_irq_mask(
1922 struct vfe_device *vfe_dev,
1923 struct msm_vfe_stats_stream *stream_info)
1924{
1925 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
1926 stream_info);
1927
1928 msm_vfe40_config_irq(vfe_dev,
1929 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16), 0,
1930 MSM_ISP_IRQ_ENABLE);
1931}
1932
1933static void msm_vfe40_stats_clear_wm_irq_mask(
1934 struct vfe_device *vfe_dev,
1935 struct msm_vfe_stats_stream *stream_info)
1936{
1937 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
1938 stream_info);
1939
1940 msm_vfe40_config_irq(vfe_dev,
1941 (1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16)), 0,
1942 MSM_ISP_IRQ_DISABLE);
1943}
1944
1945static void msm_vfe40_stats_cfg_wm_reg(
1946 struct vfe_device *vfe_dev,
1947 struct msm_vfe_stats_stream *stream_info)
1948{
1949 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
1950 stream_info);
1951 int stats_idx;
1952 uint32_t stats_base;
1953
1954 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
1955 stats_base = VFE40_STATS_BASE(stats_idx);
1956 /*WR_ADDR_CFG*/
1957 msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
1958 vfe_dev->vfe_base + stats_base + 0x8);
1959 /*WR_IRQ_FRAMEDROP_PATTERN*/
1960 msm_camera_io_w(stream_info->framedrop_pattern,
1961 vfe_dev->vfe_base + stats_base + 0x10);
1962 /*WR_IRQ_SUBSAMPLE_PATTERN*/
1963 msm_camera_io_w(0xFFFFFFFF,
1964 vfe_dev->vfe_base + stats_base + 0x14);
1965}
1966
1967static void msm_vfe40_stats_clear_wm_reg(
1968 struct vfe_device *vfe_dev,
1969 struct msm_vfe_stats_stream *stream_info)
1970{
1971 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
1972 stream_info);
1973 uint32_t val = 0;
1974 int stats_idx;
1975 uint32_t stats_base;
1976
1977 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
1978 stats_base = VFE40_STATS_BASE(stats_idx);
1979
1980 /*WR_ADDR_CFG*/
1981 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
1982 /*WR_IRQ_FRAMEDROP_PATTERN*/
1983 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
1984 /*WR_IRQ_SUBSAMPLE_PATTERN*/
1985 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x14);
1986}
1987
1988static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
1989{
1990 int i;
1991 uint32_t ub_offset;
1992 uint32_t stats_burst_len;
1993 uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
1994 64, /*MSM_ISP_STATS_BE*/
1995 128, /*MSM_ISP_STATS_BG*/
1996 128, /*MSM_ISP_STATS_BF*/
1997 16, /*MSM_ISP_STATS_AWB*/
1998 8, /*MSM_ISP_STATS_RS*/
1999 16, /*MSM_ISP_STATS_CS*/
2000 16, /*MSM_ISP_STATS_IHIST*/
2001 16, /*MSM_ISP_STATS_BHIST*/
2002 };
2003
2004 if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
2005 vfe_dev->vfe_hw_version == VFE40_8939_VERSION ||
2006 vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
2007 vfe_dev->vfe_hw_version == VFE40_8917_VERSION ||
2008 vfe_dev->vfe_hw_version == VFE40_8953_VERSION) {
2009 stats_burst_len = VFE40_STATS_BURST_LEN_8916_VERSION;
2010 ub_offset = VFE40_UB_SIZE_8916;
2011 } else if (vfe_dev->vfe_hw_version == VFE40_8952_VERSION ||
2012 vfe_dev->vfe_hw_version == VFE40_8976_VERSION) {
2013 stats_burst_len = VFE40_STATS_BURST_LEN_8916_VERSION;
2014 ub_offset = VFE40_UB_SIZE_8952;
2015 } else {
2016 stats_burst_len = VFE40_STATS_BURST_LEN;
2017 ub_offset = VFE40_UB_SIZE;
2018 }
2019
2020 for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
2021 ub_offset -= ub_size[i];
2022 msm_camera_io_w(stats_burst_len << 30 |
2023 ub_offset << 16 | (ub_size[i] - 1),
2024 vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
2025 }
2026}
2027
2028static void msm_vfe40_stats_update_cgc_override(struct vfe_device *vfe_dev,
2029 uint32_t stats_mask, uint8_t enable)
2030{
2031 int i;
2032 uint32_t module_cfg, cgc_mask = 0;
2033
2034 for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
2035 if ((stats_mask >> i) & 0x1) {
2036 switch (i) {
2037 case STATS_IDX_BE:
2038 cgc_mask |= (1 << 8);
2039 break;
2040 case STATS_IDX_BF:
2041 cgc_mask |= (1 << 10);
2042 break;
2043 case STATS_IDX_BG:
2044 cgc_mask |= (1 << 9);
2045 break;
2046 case STATS_IDX_BHIST:
2047 cgc_mask |= (1 << 15);
2048 break;
2049 case STATS_IDX_AWB:
2050 cgc_mask |= (1 << 11);
2051 break;
2052 case STATS_IDX_RS:
2053 cgc_mask |= (1 << 12);
2054 break;
2055 case STATS_IDX_CS:
2056 cgc_mask |= (1 << 13);
2057 break;
2058 case STATS_IDX_IHIST:
2059 cgc_mask |= (1 << 14);
2060 break;
2061 default:
2062 pr_err("%s: Invalid stats mask\n", __func__);
2063 return;
2064 }
2065 }
2066 }
2067
2068 /* CGC override */
2069 module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
2070 if (enable)
2071 module_cfg |= cgc_mask;
2072 else
2073 module_cfg &= ~cgc_mask;
2074 msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x974);
2075}
2076
2077static bool msm_vfe40_is_module_cfg_lock_needed(
2078 uint32_t reg_offset)
2079{
2080 if (reg_offset == 0x18)
2081 return true;
2082 else
2083 return false;
2084}
2085
2086static void msm_vfe40_stats_enable_module(struct vfe_device *vfe_dev,
2087 uint32_t stats_mask, uint8_t enable)
2088{
2089 int i;
2090 uint32_t module_cfg, module_cfg_mask = 0;
2091 unsigned long flags;
2092
2093 for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
2094 if ((stats_mask >> i) & 0x1) {
2095 switch (i) {
2096 case 0:
2097 case 1:
2098 case 2:
2099 case 3:
2100 case 4:
2101 case 5:
2102 module_cfg_mask |= 1 << (5 + i);
2103 break;
2104 case 6:
2105 module_cfg_mask |= 1 << 15;
2106 break;
2107 case 7:
2108 module_cfg_mask |= 1 << 18;
2109 break;
2110 default:
2111 pr_err("%s: Invalid stats mask\n", __func__);
2112 return;
2113 }
2114 }
2115 }
2116
2117 /*
2118 * For vfe40 stats and other modules share module_cfg register.
2119 * Hence need to Grab lock.
2120 */
2121 spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
2122 module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x18);
2123 if (enable)
2124 module_cfg |= module_cfg_mask;
2125 else
2126 module_cfg &= ~module_cfg_mask;
2127 msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x18);
2128 spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
2129}
2130
2131static void msm_vfe40_stats_update_ping_pong_addr(
2132 struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
2133 uint32_t pingpong_status, dma_addr_t paddr, uint32_t buf_sz)
2134{
2135 void __iomem *vfe_base = vfe_dev->vfe_base;
2136 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2137 stream_info);
2138 uint32_t paddr32 = (paddr & 0xFFFFFFFF);
2139 int stats_idx;
2140
2141 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
2142 msm_camera_io_w(paddr32, vfe_base +
2143 VFE40_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
2144}
2145
2146static uint32_t msm_vfe40_stats_get_wm_mask(
2147 uint32_t irq_status0, uint32_t irq_status1)
2148{
2149 return (irq_status0 >> 16) & 0xFF;
2150}
2151
2152static uint32_t msm_vfe40_stats_get_comp_mask(
2153 uint32_t irq_status0, uint32_t irq_status1)
2154{
2155 return (irq_status0 >> 29) & 0x3;
2156}
2157
2158static uint32_t msm_vfe40_stats_get_frame_id(
2159 struct vfe_device *vfe_dev)
2160{
2161 return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
2162}
2163
2164static void msm_vfe40_get_error_mask(
2165 uint32_t *error_mask0, uint32_t *error_mask1)
2166{
2167 *error_mask0 = 0x00000000;
2168 *error_mask1 = 0x00FFFEFF;
2169}
2170
2171static void msm_vfe40_get_overflow_mask(uint32_t *overflow_mask)
2172{
2173 *overflow_mask = 0x00FFFE7E;
2174}
2175
2176static void msm_vfe40_get_rdi_wm_mask(struct vfe_device *vfe_dev,
2177 uint32_t *rdi_wm_mask)
2178{
2179 *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
2180}
2181
2182static void msm_vfe40_get_irq_mask(struct vfe_device *vfe_dev,
2183 uint32_t *irq0_mask, uint32_t *irq1_mask)
2184{
2185 *irq0_mask = vfe_dev->irq0_mask;
2186 *irq1_mask = vfe_dev->irq1_mask;
2187}
2188
2189static void msm_vfe40_get_halt_restart_mask(uint32_t *irq0_mask,
2190 uint32_t *irq1_mask)
2191{
2192 *irq0_mask = BIT(31);
2193 *irq1_mask = BIT(8);
2194}
2195
2196static struct msm_vfe_axi_hardware_info msm_vfe40_axi_hw_info = {
2197 .num_wm = 7,
2198 .num_comp_mask = 3,
2199 .num_rdi = 3,
2200 .num_rdi_master = 3,
2201 .min_wm_ub = 64,
2202 .scratch_buf_range = SZ_32M + SZ_4M,
2203};
2204
2205static struct msm_vfe_stats_hardware_info msm_vfe40_stats_hw_info = {
2206 .stats_capability_mask =
2207 1 << MSM_ISP_STATS_BE | 1 << MSM_ISP_STATS_BF |
2208 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
2209 1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
2210 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS,
2211 .stats_ping_pong_offset = stats_pingpong_offset_map,
2212 .num_stats_type = VFE40_NUM_STATS_TYPE,
2213 .num_stats_comp_mask = 2,
2214};
2215
2216struct msm_vfe_hardware_info vfe40_hw_info = {
2217 .num_iommu_ctx = 1,
2218 .num_iommu_secure_ctx = 1,
2219 .vfe_clk_idx = VFE40_CLK_IDX,
2220 .runtime_axi_update = 0,
2221 .min_ab = 12000000,
2222 .min_ib = 12000000,
2223 .vfe_ops = {
2224 .irq_ops = {
2225 .read_and_clear_irq_status =
2226 msm_vfe40_read_and_clear_irq_status,
2227 .read_irq_status = msm_vfe40_read_irq_status,
2228 .process_camif_irq = msm_vfe40_process_input_irq,
2229 .process_reset_irq = msm_vfe40_process_reset_irq,
2230 .process_halt_irq = msm_vfe40_process_halt_irq,
2231 .process_reg_update = msm_vfe40_process_reg_update,
2232 .process_axi_irq = msm_isp_process_axi_irq,
2233 .process_stats_irq = msm_isp_process_stats_irq,
2234 .process_epoch_irq = msm_vfe40_process_epoch_irq,
2235 .config_irq = msm_vfe40_config_irq,
2236 .preprocess_camif_irq = msm_isp47_preprocess_camif_irq,
2237 },
2238 .axi_ops = {
2239 .reload_wm = msm_vfe40_axi_reload_wm,
2240 .enable_wm = msm_vfe40_axi_enable_wm,
2241 .cfg_io_format = msm_vfe40_cfg_io_format,
2242 .cfg_comp_mask = msm_vfe40_axi_cfg_comp_mask,
2243 .clear_comp_mask = msm_vfe40_axi_clear_comp_mask,
2244 .cfg_wm_irq_mask = msm_vfe40_axi_cfg_wm_irq_mask,
2245 .clear_wm_irq_mask = msm_vfe40_axi_clear_wm_irq_mask,
2246 .cfg_framedrop = msm_vfe40_cfg_framedrop,
2247 .clear_framedrop = msm_vfe40_clear_framedrop,
2248 .cfg_wm_reg = msm_vfe40_axi_cfg_wm_reg,
2249 .clear_wm_reg = msm_vfe40_axi_clear_wm_reg,
2250 .cfg_wm_xbar_reg = msm_vfe40_axi_cfg_wm_xbar_reg,
2251 .clear_wm_xbar_reg = msm_vfe40_axi_clear_wm_xbar_reg,
2252 .cfg_ub = msm_vfe47_cfg_axi_ub,
2253 .read_wm_ping_pong_addr =
2254 msm_vfe40_read_wm_ping_pong_addr,
2255 .update_ping_pong_addr =
2256 msm_vfe40_update_ping_pong_addr,
2257 .get_comp_mask = msm_vfe40_get_comp_mask,
2258 .get_wm_mask = msm_vfe40_get_wm_mask,
2259 .get_pingpong_status = msm_vfe40_get_pingpong_status,
2260 .halt = msm_vfe40_axi_halt,
2261 .restart = msm_vfe40_axi_restart,
2262 .update_cgc_override =
2263 msm_vfe40_axi_update_cgc_override,
2264 .ub_reg_offset = msm_vfe40_ub_reg_offset,
2265 .get_ub_size = msm_vfe40_get_ub_size,
2266 },
2267 .core_ops = {
2268 .reg_update = msm_vfe40_reg_update,
2269 .cfg_input_mux = msm_vfe40_cfg_input_mux,
2270 .update_camif_state = msm_vfe40_update_camif_state,
2271 .start_fetch_eng = msm_vfe40_start_fetch_engine,
2272 .cfg_rdi_reg = msm_vfe40_cfg_rdi_reg,
2273 .reset_hw = msm_vfe40_reset_hardware,
2274 .init_hw = msm_vfe47_init_hardware,
2275 .init_hw_reg = msm_vfe40_init_hardware_reg,
2276 .clear_status_reg = msm_vfe40_clear_status_reg,
2277 .release_hw = msm_vfe47_release_hardware,
2278 .get_error_mask = msm_vfe40_get_error_mask,
2279 .get_overflow_mask = msm_vfe40_get_overflow_mask,
2280 .get_rdi_wm_mask = msm_vfe40_get_rdi_wm_mask,
2281 .get_irq_mask = msm_vfe40_get_irq_mask,
2282 .get_halt_restart_mask =
2283 msm_vfe40_get_halt_restart_mask,
2284 .process_error_status = msm_vfe40_process_error_status,
2285 .is_module_cfg_lock_needed =
2286 msm_vfe40_is_module_cfg_lock_needed,
2287 .ahb_clk_cfg = NULL,
2288 .start_fetch_eng_multi_pass =
2289 msm_vfe40_start_fetch_engine_multi_pass,
2290 .set_halt_restart_mask =
2291 msm_vfe40_set_halt_restart_mask,
2292 .set_bus_err_ign_mask = NULL,
2293 .get_bus_err_mask = NULL,
2294 },
2295 .stats_ops = {
2296 .get_stats_idx = msm_vfe40_get_stats_idx,
2297 .check_streams = msm_vfe40_stats_check_streams,
2298 .cfg_comp_mask = msm_vfe40_stats_cfg_comp_mask,
2299 .cfg_wm_irq_mask = msm_vfe40_stats_cfg_wm_irq_mask,
2300 .clear_wm_irq_mask = msm_vfe40_stats_clear_wm_irq_mask,
2301 .cfg_wm_reg = msm_vfe40_stats_cfg_wm_reg,
2302 .clear_wm_reg = msm_vfe40_stats_clear_wm_reg,
2303 .cfg_ub = msm_vfe40_stats_cfg_ub,
2304 .enable_module = msm_vfe40_stats_enable_module,
2305 .update_ping_pong_addr =
2306 msm_vfe40_stats_update_ping_pong_addr,
2307 .get_comp_mask = msm_vfe40_stats_get_comp_mask,
2308 .get_wm_mask = msm_vfe40_stats_get_wm_mask,
2309 .get_frame_id = msm_vfe40_stats_get_frame_id,
2310 .get_pingpong_status = msm_vfe40_get_pingpong_status,
2311 .update_cgc_override =
2312 msm_vfe40_stats_update_cgc_override,
2313 .enable_stats_wm = NULL,
2314 },
2315 .platform_ops = {
2316 .get_platform_data = msm_vfe47_get_platform_data,
2317 .enable_regulators = msm_vfe47_enable_regulators,
2318 .get_regulators = msm_vfe47_get_regulators,
2319 .put_regulators = msm_vfe47_put_regulators,
2320 .enable_clks = msm_vfe47_enable_clks,
2321 .get_clks = msm_vfe47_get_clks,
2322 .put_clks = msm_vfe47_put_clks,
2323 .get_clk_rates = msm_vfe47_get_clk_rates,
2324 .get_max_clk_rate = msm_vfe47_get_max_clk_rate,
2325 .set_clk_rate = msm_vfe47_set_clk_rate,
2326 .init_bw_mgr = msm_vfe47_init_bandwidth_mgr,
2327 .deinit_bw_mgr = msm_vfe47_deinit_bandwidth_mgr,
2328 .update_bw = msm_vfe47_update_bandwidth,
2329 }
2330 },
2331 .dmi_reg_offset = 0x918,
2332 .axi_hw_info = &msm_vfe40_axi_hw_info,
2333 .stats_hw_info = &msm_vfe40_stats_hw_info,
2334 .regulator_names = {"vdd"},
2335};
2336EXPORT_SYMBOL(vfe40_hw_info);
2337
2338static const struct of_device_id msm_vfe40_dt_match[] = {
2339 {
2340 .compatible = "qcom,vfe40",
2341 .data = &vfe40_hw_info,
2342 },
2343 {}
2344};
2345
2346MODULE_DEVICE_TABLE(of, msm_vfe40_dt_match);
2347
2348static struct platform_driver vfe40_driver = {
2349 .probe = vfe_hw_probe,
2350 .driver = {
2351 .name = "msm_vfe40",
2352 .owner = THIS_MODULE,
2353 .of_match_table = msm_vfe40_dt_match,
2354 },
2355};
2356
2357static int __init msm_vfe40_init_module(void)
2358{
2359 return platform_driver_register(&vfe40_driver);
2360}
2361
2362static void __exit msm_vfe40_exit_module(void)
2363{
2364 platform_driver_unregister(&vfe40_driver);
2365}
2366
2367module_init(msm_vfe40_init_module);
2368module_exit(msm_vfe40_exit_module);
2369MODULE_DESCRIPTION("MSM VFE40 driver");
2370MODULE_LICENSE("GPL v2");
2371