blob: 75d09a37b4c15f157a1e7a458ca02c02da5e6d79 [file] [log] [blame]
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/ratelimit.h>
15
16#include "msm.h"
17#include "msm_isp_util.h"
18#include "msm_isp_axi_util.h"
19#include "msm_isp_stats_util.h"
20#include "msm_isp.h"
21#include "msm_camera_io_util.h"
22#include "cam_hw_ops.h"
23#include "msm_isp47.h"
24#include "cam_soc_api.h"
25#include "msm_isp48.h"
26#include "linux/iopoll.h"
27
28#undef CDBG
29#define CDBG(fmt, args...) pr_debug(fmt, ##args)
30
31#define VFE47_8996V1_VERSION 0x70000000
32
33#define VFE47_BURST_LEN 3
34#define VFE47_FETCH_BURST_LEN 3
35#define VFE47_STATS_BURST_LEN 3
36#define VFE47_UB_SIZE_VFE0 2048
37#define VFE47_UB_SIZE_VFE1 1536
38#define VFE47_UB_STATS_SIZE 144
39#define MSM_ISP47_TOTAL_IMAGE_UB_VFE0 (VFE47_UB_SIZE_VFE0 - VFE47_UB_STATS_SIZE)
40#define MSM_ISP47_TOTAL_IMAGE_UB_VFE1 (VFE47_UB_SIZE_VFE1 - VFE47_UB_STATS_SIZE)
41#define VFE47_WM_BASE(idx) (0xA0 + 0x2C * idx)
42#define VFE47_RDI_BASE(idx) (0x46C + 0x4 * idx)
43#define VFE47_XBAR_BASE(idx) (0x90 + 0x4 * (idx / 2))
44#define VFE47_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
45/*add ping MAX and Pong MAX*/
46#define VFE47_PING_PONG_BASE(wm, ping_pong) \
47 (VFE47_WM_BASE(wm) + 0x4 * (1 + (((~ping_pong) & 0x1) * 2)))
48#define SHIFT_BF_SCALE_BIT 1
49
50#define VFE47_BUS_RD_CGC_OVERRIDE_BIT 16
51
52#define VFE47_VBIF_CLK_OFFSET 0x4
53
54static uint32_t stats_base_addr[] = {
55 0x1D4, /* HDR_BE */
56 0x254, /* BG(AWB_BG) */
57 0x214, /* BF */
58 0x1F4, /* HDR_BHIST */
59 0x294, /* RS */
60 0x2B4, /* CS */
61 0x2D4, /* IHIST */
62 0x274, /* BHIST (SKIN_BHIST) */
63 0x234, /* AEC_BG */
64};
65
66static uint8_t stats_pingpong_offset_map[] = {
67 8, /* HDR_BE */
68 12, /* BG(AWB_BG) */
69 10, /* BF */
70 9, /* HDR_BHIST */
71 14, /* RS */
72 15, /* CS */
73 16, /* IHIST */
74 13, /* BHIST (SKIN_BHIST) */
75 11, /* AEC_BG */
76};
77
78static uint8_t stats_irq_map_comp_mask[] = {
79 16, /* HDR_BE */
80 17, /* BG(AWB_BG) */
81 18, /* BF EARLY DONE/ BF */
82 19, /* HDR_BHIST */
83 20, /* RS */
84 21, /* CS */
85 22, /* IHIST */
86 23, /* BHIST (SKIN_BHIST) */
87 15, /* AEC_BG */
88};
89
90#define VFE47_STATS_BASE(idx) (stats_base_addr[idx])
91#define VFE47_STATS_PING_PONG_BASE(idx, ping_pong) \
92 (VFE47_STATS_BASE(idx) + 0x4 * \
93 (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1) * 2)
94
95#define VFE47_SRC_CLK_DTSI_IDX 5
96
97static struct msm_bus_vectors msm_isp_init_vectors[] = {
98 {
99 .src = MSM_BUS_MASTER_VFE,
100 .dst = MSM_BUS_SLAVE_EBI_CH0,
101 .ab = 0,
102 .ib = 0,
103 },
104};
105
106/* During open node request min ab/ib bus bandwidth which
107 * is needed to successfully enable bus clocks
108 */
109static struct msm_bus_vectors msm_isp_ping_vectors[] = {
110 {
111 .src = MSM_BUS_MASTER_VFE,
112 .dst = MSM_BUS_SLAVE_EBI_CH0,
113 .ab = MSM_ISP_MIN_AB,
114 .ib = MSM_ISP_MIN_IB,
115 },
116};
117
118static struct msm_bus_vectors msm_isp_pong_vectors[] = {
119 {
120 .src = MSM_BUS_MASTER_VFE,
121 .dst = MSM_BUS_SLAVE_EBI_CH0,
122 .ab = 0,
123 .ib = 0,
124 },
125};
126
127static struct msm_bus_paths msm_isp_bus_client_config[] = {
128 {
129 ARRAY_SIZE(msm_isp_init_vectors),
130 msm_isp_init_vectors,
131 },
132 {
133 ARRAY_SIZE(msm_isp_ping_vectors),
134 msm_isp_ping_vectors,
135 },
136 {
137 ARRAY_SIZE(msm_isp_pong_vectors),
138 msm_isp_pong_vectors,
139 },
140};
141
142static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
143 msm_isp_bus_client_config,
144 NULL,
145 ARRAY_SIZE(msm_isp_bus_client_config),
146 .name = "msm_camera_isp",
147 0
148};
149
150uint32_t msm_vfe47_ub_reg_offset(struct vfe_device *vfe_dev, int wm_idx)
151{
152 return (VFE47_WM_BASE(wm_idx) + 0x18);
153}
154
155uint32_t msm_vfe47_get_ub_size(struct vfe_device *vfe_dev)
156{
157 if (vfe_dev->pdev->id == ISP_VFE0)
158 return MSM_ISP47_TOTAL_IMAGE_UB_VFE0;
159 return MSM_ISP47_TOTAL_IMAGE_UB_VFE1;
160}
161
162void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
163 uint32_t irq0_mask, uint32_t irq1_mask,
164 enum msm_isp_irq_operation oper)
165{
166 switch (oper) {
167 case MSM_ISP_IRQ_ENABLE:
168 vfe_dev->irq0_mask |= irq0_mask;
169 vfe_dev->irq1_mask |= irq1_mask;
170 msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x64);
171 msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68);
172 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
173 break;
174 case MSM_ISP_IRQ_DISABLE:
175 vfe_dev->irq0_mask &= ~irq0_mask;
176 vfe_dev->irq1_mask &= ~irq1_mask;
177 break;
178 case MSM_ISP_IRQ_SET:
179 vfe_dev->irq0_mask = irq0_mask;
180 vfe_dev->irq1_mask = irq1_mask;
181 msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x64);
182 msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68);
183 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
184 break;
185 }
186 msm_camera_io_w_mb(vfe_dev->irq0_mask,
187 vfe_dev->vfe_base + 0x5C);
188 msm_camera_io_w_mb(vfe_dev->irq1_mask,
189 vfe_dev->vfe_base + 0x60);
190}
191
192static int32_t msm_vfe47_init_dt_parms(struct vfe_device *vfe_dev,
193 struct msm_vfe_hw_init_parms *dt_parms, void __iomem *dev_mem_base)
194{
195 struct device_node *of_node;
196 int32_t i = 0, rc = 0;
197 uint32_t *dt_settings = NULL, *dt_regs = NULL, num_dt_entries = 0;
198
199 of_node = vfe_dev->pdev->dev.of_node;
200
201 rc = of_property_read_u32(of_node, dt_parms->entries,
202 &num_dt_entries);
203 if (rc < 0 || !num_dt_entries) {
204 pr_err("%s: NO QOS entries found\n", __func__);
205 return -EINVAL;
206 }
207 dt_settings = kcalloc(num_dt_entries, sizeof(uint32_t),
208 GFP_KERNEL);
209 if (!dt_settings)
210 return -ENOMEM;
211 dt_regs = kcalloc(num_dt_entries, sizeof(uint32_t),
212 GFP_KERNEL);
213 if (!dt_regs) {
214 kfree(dt_settings);
215 return -ENOMEM;
216 }
217 rc = of_property_read_u32_array(of_node, dt_parms->regs,
218 dt_regs, num_dt_entries);
219 if (rc < 0) {
220 pr_err("%s: NO QOS BUS BDG info\n", __func__);
221 kfree(dt_settings);
222 kfree(dt_regs);
223 return -EINVAL;
224 }
225 if (dt_parms->settings) {
226 rc = of_property_read_u32_array(of_node,
227 dt_parms->settings,
228 dt_settings, num_dt_entries);
229 if (rc < 0) {
230 pr_err("%s: NO QOS settings\n",
231 __func__);
232 kfree(dt_settings);
233 kfree(dt_regs);
234 } else {
235 for (i = 0; i < num_dt_entries; i++) {
236 msm_camera_io_w(dt_settings[i],
237 dev_mem_base +
238 dt_regs[i]);
239 }
240 kfree(dt_settings);
241 kfree(dt_regs);
242 }
243 } else {
244 kfree(dt_settings);
245 kfree(dt_regs);
246 }
247 return 0;
248}
249
250static enum cam_ahb_clk_vote msm_isp47_get_cam_clk_vote(
251 enum msm_vfe_ahb_clk_vote vote)
252{
253 switch (vote) {
254 case MSM_ISP_CAMERA_AHB_SVS_VOTE:
255 return CAM_AHB_SVS_VOTE;
256 case MSM_ISP_CAMERA_AHB_TURBO_VOTE:
257 return CAM_AHB_TURBO_VOTE;
258 case MSM_ISP_CAMERA_AHB_NOMINAL_VOTE:
259 return CAM_AHB_NOMINAL_VOTE;
260 case MSM_ISP_CAMERA_AHB_SUSPEND_VOTE:
261 return CAM_AHB_SUSPEND_VOTE;
262 }
263 return 0;
264}
265
266int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
267 struct msm_isp_ahb_clk_cfg *ahb_cfg)
268{
269 int rc = 0;
270 enum cam_ahb_clk_vote vote;
271 enum cam_ahb_clk_vote src_clk_vote;
272 struct msm_isp_clk_rates clk_rates;
273
274 if (ahb_cfg) {
275 vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
276 vfe_dev->user_requested_ahb_vote = vote;
277 } else {
278 vote = vfe_dev->user_requested_ahb_vote;
279 }
280
281 vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(vfe_dev,
282 &clk_rates);
283 if (vfe_dev->vfe_clk_info[vfe_dev->hw_info->vfe_clk_idx].clk_rate <=
284 clk_rates.svs_rate)
285 src_clk_vote = CAM_AHB_SVS_VOTE;
286 else if (vfe_dev->vfe_clk_info[vfe_dev->hw_info->vfe_clk_idx].clk_rate
287 <= clk_rates.nominal_rate)
288 src_clk_vote = CAM_AHB_NOMINAL_VOTE;
289 else
290 src_clk_vote = CAM_AHB_TURBO_VOTE;
291
292 /* vote for higher of the user requested or src clock matched vote */
293 if (vote < src_clk_vote)
294 vote = src_clk_vote;
295
296 if (vote && vfe_dev->ahb_vote != vote) {
297 rc = cam_config_ahb_clk(NULL, 0,
298 (vfe_dev->pdev->id == ISP_VFE0 ?
299 CAM_AHB_CLIENT_VFE0 : CAM_AHB_CLIENT_VFE1), vote);
300 if (rc)
301 pr_err("%s: failed to set ahb vote to %x\n",
302 __func__, vote);
303 else
304 vfe_dev->ahb_vote = vote;
305 }
306 return rc;
307}
308
309int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
310{
311 int rc = -1;
312 enum cam_ahb_clk_client id;
313
Milen Mitkov83b03f12018-05-18 14:00:07 +0300314 if (vfe_used_by_adsp(vfe_dev))
315 return msecs_to_jiffies(50);
316
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530317 if (vfe_dev->pdev->id == 0)
318 id = CAM_AHB_CLIENT_VFE0;
319 else
320 id = CAM_AHB_CLIENT_VFE1;
321
322 rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(
323 vfe_dev, 1);
324 if (rc)
325 goto enable_regulators_failed;
326
327 rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
328 vfe_dev, 1);
329 if (rc)
330 goto clk_enable_failed;
331
332 vfe_dev->user_requested_ahb_vote = CAM_AHB_SVS_VOTE;
333 rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
334 if (rc < 0) {
335 pr_err("%s: failed to vote for AHB\n", __func__);
336 goto ahb_vote_fail;
337 }
338 vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
339
340 vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
341 vfe_dev->vfe_base;
342
343 rc = msm_camera_enable_irq(vfe_dev->vfe_irq, 1);
344 if (rc < 0)
345 goto irq_enable_fail;
346
347 return rc;
348irq_enable_fail:
349 vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
350 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
351 pr_err("%s: failed to remove vote for AHB\n", __func__);
352 vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
353ahb_vote_fail:
354 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
355clk_enable_failed:
356 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
357enable_regulators_failed:
358 return rc;
359}
360
361void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
362{
363 enum cam_ahb_clk_client id;
364 unsigned long rate = 0;
365
366 /* when closing node, disable all irq */
367 vfe_dev->irq0_mask = 0;
368 vfe_dev->irq1_mask = 0;
369 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
370 vfe_dev->irq0_mask, vfe_dev->irq1_mask,
371 MSM_ISP_IRQ_SET);
372 msm_camera_enable_irq(vfe_dev->vfe_irq, 0);
373 tasklet_kill(&(vfe_dev->common_data->tasklets[vfe_dev->pdev->id].
374 tasklet));
375 msm_isp_flush_tasklet(vfe_dev);
376
377 vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
378
379 msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
380
381 if (vfe_dev->pdev->id == 0)
382 id = CAM_AHB_CLIENT_VFE0;
383 else
384 id = CAM_AHB_CLIENT_VFE1;
385
386 vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev, &rate);
387
388 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
389 pr_err("%s: failed to vote for AHB\n", __func__);
390
391 vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
392
393 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
394 vfe_dev, 0);
395 msm_vfe47_configure_hvx(vfe_dev, 0);
396 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
397}
398
399void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
400{
401 struct msm_vfe_hw_init_parms qos_parms;
402 struct msm_vfe_hw_init_parms vbif_parms;
403 struct msm_vfe_hw_init_parms ds_parms;
404
405 memset(&qos_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
406 memset(&vbif_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
407 memset(&ds_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
408
409 qos_parms.entries = "qos-entries";
410 qos_parms.regs = "qos-regs";
411 qos_parms.settings = "qos-settings";
412 vbif_parms.entries = "vbif-entries";
413 vbif_parms.regs = "vbif-regs";
414 vbif_parms.settings = "vbif-settings";
415 ds_parms.entries = "ds-entries";
416 ds_parms.regs = "ds-regs";
417 ds_parms.settings = "ds-settings";
418
419 msm_vfe47_init_dt_parms(vfe_dev, &qos_parms, vfe_dev->vfe_base);
420 msm_vfe47_init_dt_parms(vfe_dev, &ds_parms, vfe_dev->vfe_base);
421 msm_vfe47_init_dt_parms(vfe_dev, &vbif_parms, vfe_dev->vfe_vbif_base);
422
423
424 /* BUS_CFG */
425 msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
426 /* IRQ_MASK/CLEAR */
427 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
428 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE);
429}
430
431void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
432{
433 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
434 0x80000000, 0x0, MSM_ISP_IRQ_SET);
435 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
436 msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
437 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
438}
439
440void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev,
441 uint32_t irq_status0, uint32_t irq_status1)
442{
443 unsigned long flags;
444
445 if (irq_status0 & (1 << 31)) {
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530446 spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530447 complete(&vfe_dev->reset_complete);
448 vfe_dev->reset_pending = 0;
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530449 spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530450 }
451}
452
453void msm_vfe47_process_halt_irq(struct vfe_device *vfe_dev,
454 uint32_t irq_status0, uint32_t irq_status1)
455{
456 uint32_t val = 0;
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530457 unsigned long flags;
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530458
459 if (irq_status1 & (1 << 8)) {
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530460 spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530461 complete(&vfe_dev->halt_complete);
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530462 spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530463 msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
464 }
465
466 val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
467 val &= ~(0x1);
468 msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
469}
470
471void msm_vfe47_process_input_irq(struct vfe_device *vfe_dev,
472 uint32_t irq_status0, uint32_t irq_status1,
473 struct msm_isp_timestamp *ts)
474{
475 if (!(irq_status0 & 0x1000003))
476 return;
477
478 if (irq_status0 & (1 << 0)) {
479 ISP_DBG("%s: SOF IRQ\n", __func__);
480 msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
481 }
482
483 if (irq_status0 & (1 << 24)) {
484 ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
485 msm_isp_fetch_engine_done_notify(vfe_dev,
486 &vfe_dev->fetch_engine_info);
487 }
488
489
490 if (irq_status0 & (1 << 1))
491 ISP_DBG("%s: EOF IRQ\n", __func__);
492}
493
494void msm_vfe47_process_violation_status(
495 struct vfe_device *vfe_dev)
496{
497 uint32_t violation_status = vfe_dev->error_info.violation_status;
498
499 if (violation_status > 39) {
500 pr_err("%s: invalid violation status %d\n",
501 __func__, violation_status);
502 return;
503 }
504
505 pr_err_ratelimited("%s: VFE pipeline violation status %d\n", __func__,
506 violation_status);
507}
508
509void msm_vfe47_process_error_status(struct vfe_device *vfe_dev)
510{
511 uint32_t error_status1 = vfe_dev->error_info.error_mask1;
512
513 if (error_status1 & (1 << 0)) {
514 pr_err("%s: camif error status: 0x%x\n",
515 __func__, vfe_dev->error_info.camif_status);
516 /* dump camif registers on camif error */
517 msm_camera_io_dump(vfe_dev->vfe_base + 0x478, 0x3C, 1);
518 /* testgen */
519 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
520 msm_camera_io_dump(vfe_dev->vfe_base + 0xC58, 0x28, 1);
521 }
522 if (error_status1 & (1 << 1))
523 pr_err("%s: stats bhist overwrite\n", __func__);
524 if (error_status1 & (1 << 2))
525 pr_err("%s: stats cs overwrite\n", __func__);
526 if (error_status1 & (1 << 3))
527 pr_err("%s: stats ihist overwrite\n", __func__);
528 if (error_status1 & (1 << 4))
529 pr_err("%s: realign buf y overflow\n", __func__);
530 if (error_status1 & (1 << 5))
531 pr_err("%s: realign buf cb overflow\n", __func__);
532 if (error_status1 & (1 << 6))
533 pr_err("%s: realign buf cr overflow\n", __func__);
534 if (error_status1 & (1 << 7))
535 msm_vfe47_process_violation_status(vfe_dev);
536 if (error_status1 & (1 << 9))
537 pr_err("%s: image master 0 bus overflow\n", __func__);
538 if (error_status1 & (1 << 10))
539 pr_err("%s: image master 1 bus overflow\n", __func__);
540 if (error_status1 & (1 << 11))
541 pr_err("%s: image master 2 bus overflow\n", __func__);
542 if (error_status1 & (1 << 12))
543 pr_err("%s: image master 3 bus overflow\n", __func__);
544 if (error_status1 & (1 << 13))
545 pr_err("%s: image master 4 bus overflow\n", __func__);
546 if (error_status1 & (1 << 14))
547 pr_err("%s: image master 5 bus overflow\n", __func__);
548 if (error_status1 & (1 << 15))
549 pr_err("%s: image master 6 bus overflow\n", __func__);
550 if (error_status1 & (1 << 16))
551 pr_err("%s: status hdr be bus overflow\n", __func__);
552 if (error_status1 & (1 << 17))
553 pr_err("%s: status bg bus overflow\n", __func__);
554 if (error_status1 & (1 << 18))
555 pr_err("%s: status bf bus overflow\n", __func__);
556 if (error_status1 & (1 << 19))
557 pr_err("%s: status hdr bhist bus overflow\n", __func__);
558 if (error_status1 & (1 << 20))
559 pr_err("%s: status rs bus overflow\n", __func__);
560 if (error_status1 & (1 << 21))
561 pr_err("%s: status cs bus overflow\n", __func__);
562 if (error_status1 & (1 << 22))
563 pr_err("%s: status ihist bus overflow\n", __func__);
564 if (error_status1 & (1 << 23))
565 pr_err("%s: status skin bhist bus overflow\n", __func__);
566 if (error_status1 & (1 << 24))
567 pr_err("%s: status aec bg bus overflow\n", __func__);
568 if (error_status1 & (1 << 25))
569 pr_err("%s: status dsp error\n", __func__);
570}
571
572void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev,
573 uint32_t *irq_status0, uint32_t *irq_status1)
574{
Srikanth Uyyalaeaba8cc2018-03-14 10:42:10 +0530575 uint32_t count = 0;
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530576 *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
577 *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
578 /* Mask off bits that are not enabled */
579 msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
580 msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x68);
581 msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
582 *irq_status0 &= vfe_dev->irq0_mask;
583 *irq_status1 &= vfe_dev->irq1_mask;
Srikanth Uyyalaeaba8cc2018-03-14 10:42:10 +0530584 /* check if status register is cleared if not clear again*/
585 while (*irq_status0 &&
586 (*irq_status0 & msm_camera_io_r(vfe_dev->vfe_base + 0x6C)) &&
587 (count < MAX_RECOVERY_THRESHOLD)) {
588 msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
589 msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
590 count++;
591 }
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530592
593 if (*irq_status1 & (1 << 0)) {
594 vfe_dev->error_info.camif_status =
595 msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
596 /* mask off camif error after first occurrance */
597 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0,
598 (1 << 0), MSM_ISP_IRQ_DISABLE);
599 }
600
601 if (*irq_status1 & (1 << 7))
602 vfe_dev->error_info.violation_status =
603 msm_camera_io_r(vfe_dev->vfe_base + 0x7C);
604
605}
606
607void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
608 uint32_t *irq_status0, uint32_t *irq_status1)
609{
610 *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
611 *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
612}
613
614void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
615 uint32_t irq_status0, uint32_t irq_status1,
616 struct msm_isp_timestamp *ts)
617{
618 enum msm_vfe_input_src i;
619 uint32_t shift_irq;
620 uint8_t reg_updated = 0;
621 unsigned long flags;
622
623 if (!(irq_status0 & 0xF0))
624 return;
625 /* Shift status bits so that PIX SOF is 1st bit */
626 shift_irq = ((irq_status0 & 0xF0) >> 4);
627
628 for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
629 if (shift_irq & BIT(i)) {
630 reg_updated |= BIT(i);
631 ISP_DBG("%s REG_UPDATE IRQ %x vfe %d\n", __func__,
632 (uint32_t)BIT(i), vfe_dev->pdev->id);
633 switch (i) {
634 case VFE_PIX_0:
635 msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
636 VFE_PIX_0, ts);
637 msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
638 MSM_ISP_COMP_IRQ_REG_UPD);
639 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
640 MSM_ISP_COMP_IRQ_REG_UPD, ts);
641 /*
642 * if only camif raw streams active then force
643 * reg update
644 */
645 if (vfe_dev->axi_data.src_info[VFE_PIX_0].
646 raw_stream_count > 0 &&
647 vfe_dev->axi_data.src_info[VFE_PIX_0].
648 stream_count == 0)
649 vfe_dev->hw_info->vfe_ops.core_ops.
650 reg_update(vfe_dev, i);
651 break;
652 case VFE_RAW_0:
653 case VFE_RAW_1:
654 case VFE_RAW_2:
655 msm_isp_increment_frame_id(vfe_dev, i, ts);
656 msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
657 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
658 MSM_ISP_COMP_IRQ_REG_UPD, ts);
659 /*
660 * Reg Update is pseudo SOF for RDI,
661 * so request every frame
662 */
663 vfe_dev->hw_info->vfe_ops.core_ops.
664 reg_update(vfe_dev, i);
665 /* reg upd is also epoch for RDI */
666 msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
667 MSM_ISP_COMP_IRQ_EPOCH, ts);
668 break;
669 default:
670 pr_err("%s: Error case\n", __func__);
671 return;
672 }
673 }
674 }
675
676 spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
677 if (reg_updated & BIT(VFE_PIX_0))
678 vfe_dev->reg_updated = 1;
679
680 vfe_dev->reg_update_requested &= ~reg_updated;
681 spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
682}
683
684void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
685 uint32_t irq_status0, uint32_t irq_status1,
686 struct msm_isp_timestamp *ts)
687{
688 if (!(irq_status0 & 0xc))
689 return;
690
691 if (irq_status0 & BIT(2)) {
692 ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
693 msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
694 MSM_ISP_COMP_IRQ_EPOCH, ts);
695 msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
696 MSM_ISP_COMP_IRQ_EPOCH);
697 msm_isp_update_error_frame_count(vfe_dev);
698 msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
699 if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
700 && vfe_dev->axi_data.src_info[VFE_PIX_0].
701 stream_count == 0) {
702 msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
703 MSM_ISP_COMP_IRQ_REG_UPD, ts);
704 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
705 vfe_dev, VFE_PIX_0);
706 }
707 }
708}
709
710void msm_isp47_preprocess_camif_irq(struct vfe_device *vfe_dev,
711 uint32_t irq_status0)
712{
713 if (irq_status0 & BIT(3))
714 vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame = false;
715 if (irq_status0 & BIT(0))
716 vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame = true;
717}
718
719void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
720 enum msm_vfe_input_src frame_src)
721{
722 uint32_t update_mask = 0;
723 unsigned long flags;
724
725 /* This HW supports upto VFE_RAW_2 */
726 if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
727 pr_err("%s Error case\n", __func__);
728 return;
729 }
730
731 /*
732 * If frame_src == VFE_SRC_MAX request reg_update on all
733 * supported INTF
734 */
735 if (frame_src == VFE_SRC_MAX)
736 update_mask = 0xF;
737 else
738 update_mask = BIT((uint32_t)frame_src);
739 ISP_DBG("%s update_mask %x\n", __func__, update_mask);
740
741 spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
742 vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
743 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
744 vfe_dev->reg_update_requested |= update_mask;
745 vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
746 vfe_dev->reg_update_requested;
747 if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
748 ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
749 if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) {
750 pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__);
751 spin_unlock_irqrestore(&vfe_dev->reg_update_lock,
752 flags);
753 return;
754 }
755 msm_camera_io_w_mb(update_mask,
756 vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
757 + 0x4AC);
758 msm_camera_io_w_mb(update_mask,
759 vfe_dev->vfe_base + 0x4AC);
760 } else if (!vfe_dev->is_split ||
761 ((frame_src == VFE_PIX_0) &&
762 (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
763 (vfe_dev->axi_data.src_info[VFE_PIX_0].
764 raw_stream_count == 0)) ||
765 (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
766 msm_camera_io_w_mb(update_mask,
767 vfe_dev->vfe_base + 0x4AC);
768 }
769 spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
770}
771
772long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev,
773 uint32_t first_start, uint32_t blocking_call)
774{
775 long rc = 0;
776 uint32_t reset;
777 unsigned long flags;
778
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530779 spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530780 init_completion(&vfe_dev->reset_complete);
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +0530781 spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +0530782
783 if (blocking_call)
784 vfe_dev->reset_pending = 1;
785
786 if (first_start) {
787 if (msm_vfe_is_vfe48(vfe_dev))
788 reset = 0x3F7;
789 else
790 reset = 0x3FF;
791 msm_camera_io_w_mb(reset, vfe_dev->vfe_base + 0x18);
792 } else {
793 if (msm_vfe_is_vfe48(vfe_dev))
794 reset = 0x3E7;
795 else
796 reset = 0x3EF;
797 msm_camera_io_w_mb(reset, vfe_dev->vfe_base + 0x18);
798 msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
799 msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
800 msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
801 vfe_dev->hw_info->vfe_ops.axi_ops.
802 reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0011FFFF);
803 }
804
805 if (blocking_call) {
806 rc = wait_for_completion_interruptible_timeout(
807 &vfe_dev->reset_complete, msecs_to_jiffies(100));
808 if (rc <= 0) {
809 pr_err("%s:%d failed: reset timeout\n", __func__,
810 __LINE__);
811 vfe_dev->reset_pending = 0;
812 }
813 }
814
815 return rc;
816}
817
818void msm_vfe47_axi_reload_wm(struct vfe_device *vfe_dev,
819 void __iomem *vfe_base, uint32_t reload_mask)
820{
821 msm_camera_io_w_mb(reload_mask, vfe_base + 0x80);
822}
823
824void msm_vfe47_axi_update_cgc_override(struct vfe_device *vfe_dev,
825 uint8_t wm_idx, uint8_t enable)
826{
827 uint32_t val;
828
829 /* Change CGC override */
830 val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
831 if (enable)
832 val |= (1 << wm_idx);
833 else
834 val &= ~(1 << wm_idx);
835 msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x3C);
836}
837
838static void msm_vfe47_axi_enable_wm(void __iomem *vfe_base,
839 uint8_t wm_idx, uint8_t enable)
840{
841 uint32_t val;
842
843 val = msm_camera_io_r(vfe_base + VFE47_WM_BASE(wm_idx));
844 if (enable)
845 val |= 0x1;
846 else
847 val &= ~0x1;
848 msm_camera_io_w_mb(val,
849 vfe_base + VFE47_WM_BASE(wm_idx));
850}
851
852void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
853 struct msm_vfe_axi_stream *stream_info)
854{
855 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
856 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
857 uint32_t comp_mask, comp_mask_index;
858 int i;
859 uint32_t overflow_mask = 0;
860
861 comp_mask_index = stream_info->comp_mask_index[vfe_idx];
862 comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
863 comp_mask &= ~(0x7F << (comp_mask_index * 8));
864 comp_mask |= (axi_data->composite_info[comp_mask_index].
865 stream_composite_mask << (comp_mask_index * 8));
866 msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
867
868 for (i = 0; i < stream_info->num_planes; i++)
869 overflow_mask |= (1 << (stream_info->wm[vfe_idx][i] + 9));
870
871 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
872 1 << (comp_mask_index + 25), overflow_mask,
873 MSM_ISP_IRQ_ENABLE);
874}
875
876void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
877 struct msm_vfe_axi_stream *stream_info)
878{
879 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
880 uint32_t comp_mask, comp_mask_index;
881
882 comp_mask_index = stream_info->comp_mask_index[vfe_idx];
883 comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
884 comp_mask &= ~(0x7F << (comp_mask_index * 8));
885 msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
886
887 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
888 (1 << (comp_mask_index + 25)), 0,
889 MSM_ISP_IRQ_DISABLE);
890}
891
892void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
893 struct msm_vfe_axi_stream *stream_info)
894{
895 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
896
897 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
898 1 << (stream_info->wm[vfe_idx][0] + 8),
899 1 << (stream_info->wm[vfe_idx][0] + 9),
900 MSM_ISP_IRQ_ENABLE);
901}
902
903void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
904 struct msm_vfe_axi_stream *stream_info)
905{
906 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
907
908 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
909 (1 << (stream_info->wm[vfe_idx][0] + 8)),
910 0, MSM_ISP_IRQ_DISABLE);
911}
912
913void msm_vfe47_cfg_framedrop(struct vfe_device *vfe_dev,
914 struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
915 uint32_t framedrop_period)
916{
917 void __iomem *vfe_base = vfe_dev->vfe_base;
918 uint32_t i, temp;
919 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
920
921 for (i = 0; i < stream_info->num_planes; i++) {
922 msm_camera_io_w(framedrop_pattern, vfe_base +
923 VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
924 temp = msm_camera_io_r(vfe_base +
925 VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
926 temp &= 0xFFFFFF83;
927 msm_camera_io_w(temp | (framedrop_period - 1) << 2,
928 vfe_base + VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
929 }
930}
931
932void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
933 struct msm_vfe_axi_stream *stream_info)
934{
935 uint32_t i;
936 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
937
938 for (i = 0; i < stream_info->num_planes; i++)
939 msm_camera_io_w(0, vfe_dev->vfe_base +
940 VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
941}
942
943static int32_t msm_vfe47_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
944{
945 int rc = 0;
946
947 switch (bpp) {
948 case 8:
949 *bpp_reg = 0;
950 break;
951 case 10:
952 *bpp_reg = 1;
953 break;
954 case 12:
955 *bpp_reg = 2;
956 break;
957 case 14:
958 *bpp_reg = 3;
959 break;
960 default:
961 pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
962 return -EINVAL;
963 }
964
965 return rc;
966}
967
968static int32_t msm_vfe47_convert_io_fmt_to_reg(
969 enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
970{
971 int rc = 0;
972
973 switch (pack_format) {
974 case QCOM:
975 *pack_reg = 0x0;
976 break;
977 case MIPI:
978 *pack_reg = 0x1;
979 break;
980 case DPCM6:
981 *pack_reg = 0x2;
982 break;
983 case DPCM8:
984 *pack_reg = 0x3;
985 break;
986 case PLAIN8:
987 *pack_reg = 0x4;
988 break;
989 case PLAIN16:
990 *pack_reg = 0x5;
991 break;
992 case DPCM10:
993 *pack_reg = 0x6;
994 break;
995 default:
996 pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
997 return -EINVAL;
998 }
999
1000 return rc;
1001}
1002
1003int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev,
1004 enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
1005{
1006 int rc = 0;
1007 int bpp = 0, read_bpp = 0;
1008 enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
1009 uint32_t bpp_reg = 0, pack_reg = 0;
1010 uint32_t read_bpp_reg = 0, read_pack_reg = 0;
1011 uint32_t io_format_reg = 0; /*io format register bit*/
1012
1013 io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
1014
1015 /*input config*/
1016 if ((stream_src < RDI_INTF_0) &&
1017 (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
1018 EXTERNAL_READ)) {
1019 read_bpp = msm_isp_get_bit_per_pixel(
1020 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
1021 rc = msm_vfe47_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
1022 if (rc < 0) {
1023 pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
1024 __func__, read_bpp, rc);
1025 return rc;
1026 }
1027
1028 read_pack_fmt = msm_isp_get_pack_format(
1029 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
1030 rc = msm_vfe47_convert_io_fmt_to_reg(
1031 read_pack_fmt, &read_pack_reg);
1032 if (rc < 0) {
1033 pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
1034 __func__, rc);
1035 return rc;
1036 }
1037 /*use input format(v4l2_pix_fmt) to get pack format*/
1038 io_format_reg &= 0xFFC8FFFF;
1039 io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
1040 }
1041
1042 bpp = msm_isp_get_bit_per_pixel(io_format);
1043 rc = msm_vfe47_convert_bpp_to_reg(bpp, &bpp_reg);
1044 if (rc < 0) {
1045 pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
1046 __func__, bpp, rc);
1047 return rc;
1048 }
1049
1050 switch (stream_src) {
1051 case PIX_VIDEO:
1052 case PIX_ENCODER:
1053 case PIX_VIEWFINDER:
1054 case CAMIF_RAW:
1055 io_format_reg &= 0xFFFFCFFF;
1056 io_format_reg |= bpp_reg << 12;
1057 break;
1058 case IDEAL_RAW:
1059 /*use output format(v4l2_pix_fmt) to get pack format*/
1060 pack_fmt = msm_isp_get_pack_format(io_format);
1061 rc = msm_vfe47_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
1062 if (rc < 0) {
1063 pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
1064 __func__, rc);
1065 return rc;
1066 }
1067 io_format_reg &= 0xFFFFFFC8;
1068 io_format_reg |= bpp_reg << 4 | pack_reg;
1069 break;
1070 case RDI_INTF_0:
1071 case RDI_INTF_1:
1072 case RDI_INTF_2:
1073 default:
1074 pr_err("%s: Invalid stream source\n", __func__);
1075 return -EINVAL;
1076 }
1077 msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x88);
1078 return 0;
1079}
1080
1081int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
1082 void *arg)
1083{
1084 int rc = 0;
1085 uint32_t bufq_handle = 0;
1086 struct msm_isp_buffer *buf = NULL;
1087 struct msm_vfe_fetch_eng_start *fe_cfg = arg;
1088 struct msm_isp_buffer_mapped_info mapped_info;
1089
1090 if (vfe_dev->fetch_engine_info.is_busy == 1) {
1091 pr_err("%s: fetch engine busy\n", __func__);
1092 return -EINVAL;
1093 }
1094
1095 memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
1096
1097 /* There is other option of passing buffer address from user,
1098 * in such case, driver needs to map the buffer and use it
1099 */
1100 vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
1101 vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
1102 vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
1103 vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
1104
1105 if (!fe_cfg->offline_mode) {
1106 bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
1107 vfe_dev->buf_mgr, fe_cfg->session_id,
1108 fe_cfg->stream_id);
1109 vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
1110
Meera Gande488acf42018-03-23 16:13:18 +05301111 mutex_lock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301112 rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
1113 vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
1114 if (rc < 0 || !buf) {
1115 pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
1116 __func__, rc, buf);
Meera Gande488acf42018-03-23 16:13:18 +05301117 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301118 return -EINVAL;
1119 }
1120 mapped_info = buf->mapped_info[0];
1121 buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
Meera Gande488acf42018-03-23 16:13:18 +05301122 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301123 } else {
1124 rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
1125 &mapped_info, fe_cfg->fd);
1126 if (rc < 0) {
1127 pr_err("%s: can not map buffer\n", __func__);
1128 return -EINVAL;
1129 }
1130 }
1131
1132 vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
1133 vfe_dev->fetch_engine_info.is_busy = 1;
1134
1135 msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x2F4);
1136
1137 msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
1138 msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
1139
1140 ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
1141
1142 return 0;
1143}
1144
1145int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
1146 void *arg)
1147{
1148 int rc = 0;
1149 uint32_t bufq_handle = 0;
1150 struct msm_isp_buffer *buf = NULL;
1151 struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
1152 struct msm_isp_buffer_mapped_info mapped_info;
1153
1154 if (vfe_dev->fetch_engine_info.is_busy == 1) {
1155 pr_err("%s: fetch engine busy\n", __func__);
1156 return -EINVAL;
1157 }
1158
1159 memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
1160
1161 vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
1162 vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
1163 vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
1164 vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
1165
1166 if (!fe_cfg->offline_mode) {
1167 bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
1168 vfe_dev->buf_mgr, fe_cfg->session_id,
1169 fe_cfg->stream_id);
1170 vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
1171
Meera Gande9b528b32018-01-22 18:15:02 +05301172 mutex_lock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301173 rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
1174 vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301175 if (rc < 0 || !buf) {
1176 pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
1177 __func__, rc, buf);
Meera Gande488acf42018-03-23 16:13:18 +05301178 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301179 return -EINVAL;
1180 }
1181 mapped_info = buf->mapped_info[0];
1182 buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
Meera Gande488acf42018-03-23 16:13:18 +05301183 mutex_unlock(&vfe_dev->buf_mgr->lock);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05301184 } else {
1185 rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
1186 &mapped_info, fe_cfg->fd);
1187 if (rc < 0) {
1188 pr_err("%s: can not map buffer\n", __func__);
1189 return -EINVAL;
1190 }
1191 }
1192
1193 vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
1194 vfe_dev->fetch_engine_info.is_busy = 1;
1195
1196 msm_camera_io_w(mapped_info.paddr + fe_cfg->input_buf_offset,
1197 vfe_dev->vfe_base + 0x2F4);
1198 msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
1199 msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
1200
1201 ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
1202
1203 return 0;
1204}
1205
1206void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
1207 struct msm_vfe_pix_cfg *pix_cfg)
1208{
1209 uint32_t x_size_word, temp;
1210 struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
1211 uint32_t main_unpack_pattern = 0;
1212
1213 if (pix_cfg->input_mux == EXTERNAL_READ) {
1214 fe_cfg = &pix_cfg->fetch_engine_cfg;
1215 pr_debug("%s:VFE%d wd x ht buf = %d x %d, fe = %d x %d\n",
1216 __func__, vfe_dev->pdev->id, fe_cfg->buf_width,
1217 fe_cfg->buf_height,
1218 fe_cfg->fetch_width, fe_cfg->fetch_height);
1219
1220 vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
1221 VFE47_BUS_RD_CGC_OVERRIDE_BIT, 1);
1222
1223 temp = msm_camera_io_r(vfe_dev->vfe_base + 0x84);
1224 temp &= 0xFFFFFFFD;
1225 temp |= (1 << 1);
1226 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
1227
1228 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
1229 (1 << 24), 0,
1230 MSM_ISP_IRQ_ENABLE);
1231
1232 temp = fe_cfg->fetch_height - 1;
1233 msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x308);
1234
1235 x_size_word = msm_isp_cal_word_per_line(
1236 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
1237 fe_cfg->buf_width);
1238 msm_camera_io_w((x_size_word - 1) << 16,
1239 vfe_dev->vfe_base + 0x30c);
1240
1241 x_size_word = msm_isp_cal_word_per_line(
1242 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
1243 fe_cfg->fetch_width);
1244 msm_camera_io_w(x_size_word << 16 |
1245 (temp & 0x3FFF) << 2 | VFE47_FETCH_BURST_LEN,
1246 vfe_dev->vfe_base + 0x310);
1247
1248 temp = ((fe_cfg->buf_width - 1) & 0x3FFF) << 16 |
1249 ((fe_cfg->buf_height - 1) & 0x3FFF);
1250 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x314);
1251
1252 /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
1253 switch (vfe_dev->axi_data.src_info[VFE_PIX_0].input_format) {
1254 case V4L2_PIX_FMT_P16BGGR10:
1255 case V4L2_PIX_FMT_P16GBRG10:
1256 case V4L2_PIX_FMT_P16GRBG10:
1257 case V4L2_PIX_FMT_P16RGGB10:
1258 main_unpack_pattern = 0xB210;
1259 break;
1260 default:
1261 main_unpack_pattern = 0xF6543210;
1262 break;
1263 }
1264 msm_camera_io_w(main_unpack_pattern,
1265 vfe_dev->vfe_base + 0x318);
1266 msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x334);
1267
1268 temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1269 temp |= 2 << 5;
1270 temp |= 128 << 8;
1271 temp |= (pix_cfg->pixel_pattern & 0x3);
1272 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
1273
1274 } else {
1275 pr_err("%s: Invalid mux configuration - mux: %d", __func__,
1276 pix_cfg->input_mux);
1277 }
1278}
1279
1280void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev,
1281 struct msm_vfe_testgen_cfg *testgen_cfg)
1282{
1283 uint32_t temp;
1284 uint32_t bit_per_pixel = 0;
1285 uint32_t bpp_reg = 0;
1286 uint32_t bayer_pix_pattern_reg = 0;
1287 uint32_t unicolorbar_reg = 0;
1288 uint32_t unicolor_enb = 0;
1289
1290 bit_per_pixel = msm_isp_get_bit_per_pixel(
1291 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
1292
1293 switch (bit_per_pixel) {
1294 case 8:
1295 bpp_reg = 0x0;
1296 break;
1297 case 10:
1298 bpp_reg = 0x1;
1299 break;
1300 case 12:
1301 bpp_reg = 0x10;
1302 break;
1303 case 14:
1304 bpp_reg = 0x11;
1305 break;
1306 default:
1307 pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
1308 break;
1309 }
1310
1311 msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
1312 vfe_dev->vfe_base + 0xC5C);
1313
1314 msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
1315 (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0xC60);
1316
1317 temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1318 temp |= (((testgen_cfg->h_blank) & 0x3FFF) << 8);
1319 temp |= (1 << 22);
1320 msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
1321
1322 msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
1323 vfe_dev->vfe_base + 0xC70);
1324
1325 switch (testgen_cfg->pixel_bayer_pattern) {
1326 case ISP_BAYER_RGRGRG:
1327 bayer_pix_pattern_reg = 0x0;
1328 break;
1329 case ISP_BAYER_GRGRGR:
1330 bayer_pix_pattern_reg = 0x1;
1331 break;
1332 case ISP_BAYER_BGBGBG:
1333 bayer_pix_pattern_reg = 0x10;
1334 break;
1335 case ISP_BAYER_GBGBGB:
1336 bayer_pix_pattern_reg = 0x11;
1337 break;
1338 default:
1339 pr_err("%s: invalid pix pattern %d\n",
1340 __func__, bit_per_pixel);
1341 break;
1342 }
1343
1344 if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
1345 unicolor_enb = 0x0;
1346 } else {
1347 unicolor_enb = 0x1;
1348 switch (testgen_cfg->color_bar_pattern) {
1349 case UNICOLOR_WHITE:
1350 unicolorbar_reg = 0x0;
1351 break;
1352 case UNICOLOR_YELLOW:
1353 unicolorbar_reg = 0x1;
1354 break;
1355 case UNICOLOR_CYAN:
1356 unicolorbar_reg = 0x10;
1357 break;
1358 case UNICOLOR_GREEN:
1359 unicolorbar_reg = 0x11;
1360 break;
1361 case UNICOLOR_MAGENTA:
1362 unicolorbar_reg = 0x100;
1363 break;
1364 case UNICOLOR_RED:
1365 unicolorbar_reg = 0x101;
1366 break;
1367 case UNICOLOR_BLUE:
1368 unicolorbar_reg = 0x110;
1369 break;
1370 case UNICOLOR_BLACK:
1371 unicolorbar_reg = 0x111;
1372 break;
1373 default:
1374 pr_err("%s: invalid colorbar %d\n",
1375 __func__, testgen_cfg->color_bar_pattern);
1376 break;
1377 }
1378 }
1379
1380 msm_camera_io_w((testgen_cfg->rotate_period << 8) |
1381 (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
1382 (unicolorbar_reg), vfe_dev->vfe_base + 0xC78);
1383}
1384
1385void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev,
1386 struct msm_vfe_pix_cfg *pix_cfg)
1387{
1388 uint16_t first_pixel, last_pixel, first_line, last_line;
1389 uint16_t epoch_line1;
1390 struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
1391 struct msm_vfe_testgen_cfg *testgen_cfg = &pix_cfg->testgen_cfg;
1392 uint32_t val, subsample_period, subsample_pattern;
1393 uint32_t irq_sub_period = 32;
1394 uint32_t frame_sub_period = 32;
1395 struct msm_vfe_camif_subsample_cfg *subsample_cfg =
1396 &pix_cfg->camif_cfg.subsample_cfg;
1397 uint16_t bus_sub_en = 0;
1398
1399 if (subsample_cfg->pixel_skip || subsample_cfg->line_skip)
1400 bus_sub_en = 1;
1401 else
1402 bus_sub_en = 0;
1403
1404 vfe_dev->dual_vfe_enable = camif_cfg->is_split;
1405
1406 msm_camera_io_w(pix_cfg->input_mux << 5 | pix_cfg->pixel_pattern,
1407 vfe_dev->vfe_base + 0x50);
1408
1409 first_pixel = camif_cfg->first_pixel;
1410 last_pixel = camif_cfg->last_pixel;
1411 first_line = camif_cfg->first_line;
1412 last_line = camif_cfg->last_line;
1413 epoch_line1 = camif_cfg->epoch_line1;
1414
1415 if ((epoch_line1 <= 0) || (epoch_line1 > last_line))
1416 epoch_line1 = last_line - 50;
1417
1418 if ((last_line - epoch_line1) > 100)
1419 epoch_line1 = last_line - 100;
1420
1421 subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
1422 subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
1423
1424 if (pix_cfg->input_mux == TESTGEN)
1425 msm_camera_io_w((testgen_cfg->lines_per_frame - 1) << 16 |
1426 (testgen_cfg->pixels_per_line - 1),
1427 vfe_dev->vfe_base + 0x484);
1428 else
1429 msm_camera_io_w((camif_cfg->lines_per_frame - 1) << 16 |
1430 (camif_cfg->pixels_per_line - 1),
1431 vfe_dev->vfe_base + 0x484);
1432
1433 if (bus_sub_en) {
1434 val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
1435 val &= 0xFFFFFFDF;
1436 val = val | bus_sub_en << 5;
1437 msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
1438 subsample_cfg->pixel_skip &= 0x0000FFFF;
1439 subsample_cfg->line_skip &= 0x0000FFFF;
1440 msm_camera_io_w((subsample_cfg->line_skip << 16) |
1441 subsample_cfg->pixel_skip, vfe_dev->vfe_base + 0x490);
1442 }
1443
1444
1445 msm_camera_io_w(first_pixel << 16 | last_pixel,
1446 vfe_dev->vfe_base + 0x488);
1447
1448 msm_camera_io_w(first_line << 16 | last_line,
1449 vfe_dev->vfe_base + 0x48C);
1450
1451 /* configure EPOCH0: 20 lines, and
1452 * configure EPOCH1: epoch_line1 before EOF
1453 */
1454 msm_camera_io_w_mb(0x140000 | epoch_line1,
1455 vfe_dev->vfe_base + 0x4A0);
1456 pr_debug("%s:%d: epoch_line1: %d\n",
1457 __func__, __LINE__, epoch_line1);
1458 msm_camera_io_w(((irq_sub_period - 1) << 8) | 0 << 5 |
1459 (frame_sub_period - 1), vfe_dev->vfe_base + 0x494);
1460 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x498);
1461 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x49C);
1462 if (subsample_period && subsample_pattern) {
1463 val = msm_camera_io_r(vfe_dev->vfe_base + 0x494);
1464 val &= 0xFFFFE0FF;
1465 val |= (subsample_period - 1) << 8;
1466 msm_camera_io_w(val, vfe_dev->vfe_base + 0x494);
1467 ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
1468 __func__, subsample_period, subsample_pattern);
1469
1470 val = subsample_pattern;
1471 msm_camera_io_w(val, vfe_dev->vfe_base + 0x49C);
1472 } else {
1473 msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x49C);
1474 }
1475
1476 if (subsample_cfg->first_pixel ||
1477 subsample_cfg->last_pixel ||
1478 subsample_cfg->first_line ||
1479 subsample_cfg->last_line) {
1480 msm_camera_io_w(
1481 subsample_cfg->first_pixel << 16 |
1482 subsample_cfg->last_pixel,
1483 vfe_dev->vfe_base + 0xCE4);
1484 msm_camera_io_w(
1485 subsample_cfg->first_line << 16 |
1486 subsample_cfg->last_line,
1487 vfe_dev->vfe_base + 0xCE8);
1488 val = msm_camera_io_r(
1489 vfe_dev->vfe_base + 0x47C);
1490 ISP_DBG("%s: camif raw crop enabled\n", __func__);
1491 val |= 1 << 22;
1492 msm_camera_io_w(val,
1493 vfe_dev->vfe_base + 0x47C);
1494 }
1495
1496 ISP_DBG("%s: camif raw op fmt %d\n",
1497 __func__, subsample_cfg->output_format);
1498 /* Pdaf output can be sent in below formats */
1499 val = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
1500 switch (subsample_cfg->output_format) {
1501 case CAMIF_PLAIN_8:
1502 val |= PLAIN8 << 9;
1503 break;
1504 case CAMIF_PLAIN_16:
1505 val |= PLAIN16 << 9;
1506 break;
1507 case CAMIF_MIPI_RAW:
1508 val |= MIPI << 9;
1509 break;
1510 case CAMIF_QCOM_RAW:
1511 val |= QCOM << 9;
1512 break;
1513 default:
1514 break;
1515 }
1516 msm_camera_io_w(val, vfe_dev->vfe_base + 0x88);
1517
1518 val = msm_camera_io_r(vfe_dev->vfe_base + 0x46C);
1519 val |= camif_cfg->camif_input;
1520 msm_camera_io_w(val, vfe_dev->vfe_base + 0x46C);
1521}
1522
1523void msm_vfe47_cfg_input_mux(struct vfe_device *vfe_dev,
1524 struct msm_vfe_pix_cfg *pix_cfg)
1525{
1526 uint32_t core_cfg = 0;
1527 uint32_t val = 0;
1528
1529 core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1530 core_cfg &= 0xFFFFFF9F;
1531
1532 switch (pix_cfg->input_mux) {
1533 case CAMIF:
1534 core_cfg |= 0x0 << 5;
1535 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
1536 msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
1537 break;
1538 case TESTGEN:
1539 /* Change CGC override */
1540 val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
1541 val |= (1 << 31);
1542 msm_camera_io_w(val, vfe_dev->vfe_base + 0x3C);
1543
1544 /* CAMIF and TESTGEN will both go thorugh CAMIF*/
1545 core_cfg |= 0x1 << 5;
1546 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
1547 msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
1548 msm_vfe47_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
1549 break;
1550 case EXTERNAL_READ:
1551 core_cfg |= 0x2 << 5;
1552 msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
1553 msm_vfe47_cfg_fetch_engine(vfe_dev, pix_cfg);
1554 break;
1555 default:
1556 pr_err("%s: Unsupported input mux %d\n",
1557 __func__, pix_cfg->input_mux);
1558 break;
1559 }
1560}
1561
1562void msm_vfe47_configure_hvx(struct vfe_device *vfe_dev,
1563 uint8_t is_stream_on)
1564{
1565 uint32_t val;
1566 int rc = 0;
1567
1568 if (is_stream_on == vfe_dev->cur_hvx_state) {
1569 ISP_DBG("already in same hvx state\n");
1570 return;
1571 }
1572 if (vfe_dev->buf_mgr->secure_enable == SECURE_MODE) {
1573 pr_err("%s: Cannot configure hvx, secure_mode: %d\n",
1574 __func__,
1575 vfe_dev->buf_mgr->secure_enable);
1576 return;
1577 }
1578 if (!vfe_dev->hvx_clk) {
1579 pr_err("%s: no stream_clk\n", __func__);
1580 return;
1581 }
1582 if (is_stream_on) {
1583 /* Enable HVX */
1584 if (!vfe_dev->hvx_clk_state) {
1585 rc = msm_camera_clk_enable(&vfe_dev->pdev->dev,
1586 vfe_dev->hvx_clk_info, vfe_dev->hvx_clk,
1587 vfe_dev->num_hvx_clk, is_stream_on);
1588 if (rc) {
1589 pr_err("%s: stream_clk enable failed\n",
1590 __func__);
1591 return;
1592 }
1593 vfe_dev->hvx_clk_state = true;
1594 }
1595 val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1596 val |= (1 << 3);
1597 msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
1598 val &= 0xFF7FFFFF;
1599 if (vfe_dev->hvx_cmd == HVX_ROUND_TRIP)
1600 val |= (1 << 23);
1601 msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
1602 } else {
1603 /* Disable HVX */
1604 if (!vfe_dev->hvx_clk_state)
1605 return;
1606 rc = msm_camera_clk_enable(&vfe_dev->pdev->dev,
1607 vfe_dev->hvx_clk_info, vfe_dev->hvx_clk,
1608 vfe_dev->num_hvx_clk, is_stream_on);
1609 if (rc) {
1610 pr_err("%s: stream_clk disable failed\n",
1611 __func__);
1612 return;
1613 }
1614 vfe_dev->hvx_clk_state = false;
1615 val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
1616 val &= 0xFFFFFFF7;
1617 msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
1618 }
1619 vfe_dev->cur_hvx_state = is_stream_on;
1620}
1621
1622void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
1623 enum msm_isp_camif_update_state update_state)
1624{
1625 uint32_t val;
1626 bool bus_en, vfe_en;
1627
1628 if (update_state == NO_UPDATE)
1629 return;
1630
1631 val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
1632 if (update_state == ENABLE_CAMIF) {
1633 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
1634 0x1F, 0x91,
1635 MSM_ISP_IRQ_ENABLE);
1636
1637 if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
1638 (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
1639 msm_vfe47_configure_hvx(vfe_dev, 1);
1640 else
1641 msm_vfe47_configure_hvx(vfe_dev, 0);
1642
1643 bus_en =
1644 ((vfe_dev->axi_data.
1645 src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
1646 vfe_en =
1647 ((vfe_dev->axi_data.
1648 src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
1649 val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
1650 val &= 0xFFFFFF3F;
1651 val = val | bus_en << 7 | vfe_en << 6;
1652 msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
1653 msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x478);
1654 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x478);
1655 /* testgen GO*/
1656 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1657 msm_camera_io_w(1, vfe_dev->vfe_base + 0xC58);
1658 } else if (update_state == DISABLE_CAMIF ||
1659 update_state == DISABLE_CAMIF_IMMEDIATELY) {
1660 uint32_t poll_val;
1661
1662 /* For testgen always halt on camif boundary */
1663 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1664 update_state = DISABLE_CAMIF;
1665 /* turn off camif, violation and write master overwrite irq */
1666 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0, 0x91,
1667 MSM_ISP_IRQ_DISABLE);
1668 val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
1669 /* disable danger signal */
1670 msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
1671 msm_camera_io_w_mb((update_state == DISABLE_CAMIF ? 0x0 : 0x6),
1672 vfe_dev->vfe_base + 0x478);
1673 if (readl_poll_timeout_atomic(vfe_dev->vfe_base + 0x4A4,
1674 poll_val, poll_val & 0x80000000, 1000, 2000000))
1675 pr_err("%s: camif disable failed %x\n",
1676 __func__, poll_val);
1677 /* testgen OFF*/
1678 if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
1679 msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
1680
1681 if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
1682 (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
1683 msm_vfe47_configure_hvx(vfe_dev, 0);
1684
1685 }
1686}
1687
1688void msm_vfe47_cfg_rdi_reg(
1689 struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
1690 enum msm_vfe_input_src input_src)
1691{
1692 uint8_t rdi = input_src - VFE_RAW_0;
1693 uint32_t rdi_reg_cfg;
1694
1695 rdi_reg_cfg = msm_camera_io_r(
1696 vfe_dev->vfe_base + VFE47_RDI_BASE(rdi));
1697 rdi_reg_cfg &= 0x3;
1698 rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 1 << 2;
1699 msm_camera_io_w(
1700 rdi_reg_cfg, vfe_dev->vfe_base + VFE47_RDI_BASE(rdi));
1701}
1702
1703void msm_vfe47_axi_cfg_wm_reg(
1704 struct vfe_device *vfe_dev,
1705 struct msm_vfe_axi_stream *stream_info,
1706 uint8_t plane_idx)
1707{
1708 uint32_t val;
1709 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1710 uint32_t wm_base;
1711
1712 wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
1713 val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
1714 val &= ~0x2;
1715 if (stream_info->frame_based)
1716 val |= 0x2;
1717 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
1718 if (!stream_info->frame_based) {
1719 /* WR_IMAGE_SIZE */
1720 val = ((msm_isp_cal_word_per_line(
1721 stream_info->output_format,
1722 stream_info->plane_cfg[vfe_idx][plane_idx].
1723 output_width)+3)/4 - 1) << 16 |
1724 (stream_info->plane_cfg[vfe_idx][plane_idx].
1725 output_height - 1);
1726 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
1727 /* WR_BUFFER_CFG */
1728 val = VFE47_BURST_LEN |
1729 (stream_info->plane_cfg[vfe_idx][plane_idx].
1730 output_height - 1) <<
1731 2 |
1732 ((msm_isp_cal_word_per_line(stream_info->output_format,
1733 stream_info->plane_cfg[vfe_idx][plane_idx].
1734 output_stride)+1)/2) << 16;
1735 }
1736 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
1737 /* WR_IRQ_SUBSAMPLE_PATTERN */
1738 msm_camera_io_w(0xFFFFFFFF,
1739 vfe_dev->vfe_base + wm_base + 0x28);
1740}
1741
1742void msm_vfe47_axi_clear_wm_reg(
1743 struct vfe_device *vfe_dev,
1744 struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
1745{
1746 uint32_t val = 0;
1747 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1748 uint32_t wm_base;
1749
1750 wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
1751 /* WR_ADDR_CFG */
1752 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
1753 /* WR_IMAGE_SIZE */
1754 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
1755 /* WR_BUFFER_CFG */
1756 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
1757 /* WR_IRQ_SUBSAMPLE_PATTERN */
1758 msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x28);
1759}
1760
1761void msm_vfe47_axi_cfg_wm_xbar_reg(
1762 struct vfe_device *vfe_dev,
1763 struct msm_vfe_axi_stream *stream_info,
1764 uint8_t plane_idx)
1765{
1766 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1767 struct msm_vfe_axi_plane_cfg *plane_cfg;
1768 uint8_t wm;
1769 uint32_t xbar_cfg = 0;
1770 uint32_t xbar_reg_cfg = 0;
1771
1772 plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
1773 wm = stream_info->wm[vfe_idx][plane_idx];
1774 switch (stream_info->stream_src) {
1775 case PIX_VIDEO:
1776 case PIX_ENCODER:
1777 case PIX_VIEWFINDER: {
1778 if (plane_cfg->output_plane_format != CRCB_PLANE &&
1779 plane_cfg->output_plane_format != CBCR_PLANE) {
1780 /* SINGLE_STREAM_SEL */
1781 xbar_cfg |= plane_cfg->output_plane_format << 8;
1782 } else {
1783 switch (stream_info->output_format) {
1784 case V4L2_PIX_FMT_NV12:
1785 case V4L2_PIX_FMT_NV14:
1786 case V4L2_PIX_FMT_NV16:
1787 case V4L2_PIX_FMT_NV24:
1788 /* PAIR_STREAM_SWAP_CTRL */
1789 xbar_cfg |= 0x3 << 4;
1790 break;
1791 }
1792 xbar_cfg |= 0x1 << 2; /* PAIR_STREAM_EN */
1793 }
1794 if (stream_info->stream_src == PIX_VIEWFINDER)
1795 xbar_cfg |= 0x1; /* VIEW_STREAM_EN */
1796 else if (stream_info->stream_src == PIX_VIDEO)
1797 xbar_cfg |= 0x2;
1798 break;
1799 }
1800 case CAMIF_RAW:
1801 xbar_cfg = 0x300;
1802 break;
1803 case IDEAL_RAW:
1804 xbar_cfg = 0x400;
1805 break;
1806 case RDI_INTF_0:
1807 xbar_cfg = 0xC00;
1808 break;
1809 case RDI_INTF_1:
1810 xbar_cfg = 0xD00;
1811 break;
1812 case RDI_INTF_2:
1813 xbar_cfg = 0xE00;
1814 break;
1815 default:
1816 pr_err("%s: Invalid stream src\n", __func__);
1817 break;
1818 }
1819
1820 xbar_reg_cfg =
1821 msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
1822 xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
1823 xbar_reg_cfg |= (xbar_cfg << VFE47_XBAR_SHIFT(wm));
1824 msm_camera_io_w(xbar_reg_cfg,
1825 vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
1826}
1827
1828void msm_vfe47_axi_clear_wm_xbar_reg(
1829 struct vfe_device *vfe_dev,
1830 struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
1831{
1832 int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
1833 uint8_t wm;
1834 uint32_t xbar_reg_cfg = 0;
1835
1836 wm = stream_info->wm[vfe_idx][plane_idx];
1837 xbar_reg_cfg =
1838 msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
1839 xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
1840 msm_camera_io_w(xbar_reg_cfg,
1841 vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
1842}
1843
1844
1845void msm_vfe47_cfg_axi_ub_equal_default(
1846 struct vfe_device *vfe_dev,
1847 enum msm_vfe_input_src frame_src)
1848{
1849 int i;
1850 uint32_t ub_offset = 0;
1851 struct msm_vfe_axi_shared_data *axi_data =
1852 &vfe_dev->axi_data;
1853 uint32_t total_image_size = 0;
1854 uint8_t num_used_wms = 0;
1855 uint32_t prop_size = 0;
1856 uint32_t wm_ub_size;
1857 uint64_t delta;
1858
1859 if (frame_src == VFE_PIX_0) {
1860 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
1861 if (axi_data->free_wm[i] &&
1862 SRC_TO_INTF(
1863 HANDLE_TO_IDX(axi_data->free_wm[i])) ==
1864 VFE_PIX_0) {
1865 num_used_wms++;
1866 total_image_size +=
1867 axi_data->wm_image_size[i];
1868 }
1869 }
1870 ub_offset = (axi_data->hw_info->num_rdi * 2) *
1871 axi_data->hw_info->min_wm_ub;
1872 prop_size = vfe_dev->hw_info->vfe_ops.axi_ops.
1873 get_ub_size(vfe_dev) -
1874 axi_data->hw_info->min_wm_ub * (num_used_wms +
1875 axi_data->hw_info->num_rdi * 2);
1876 }
1877
1878 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
1879 if (!axi_data->free_wm[i]) {
1880 msm_camera_io_w(0,
1881 vfe_dev->vfe_base +
1882 vfe_dev->hw_info->vfe_ops.axi_ops.
1883 ub_reg_offset(vfe_dev, i));
1884 continue;
1885 }
1886
1887 if (frame_src != SRC_TO_INTF(
1888 HANDLE_TO_IDX(axi_data->free_wm[i])))
1889 continue;
1890
1891 if (frame_src == VFE_PIX_0) {
1892 if (total_image_size) {
1893 delta = (uint64_t)axi_data->wm_image_size[i] *
1894 (uint64_t)prop_size;
1895 do_div(delta, total_image_size);
1896 wm_ub_size = axi_data->hw_info->min_wm_ub +
1897 (uint32_t)delta;
1898 msm_camera_io_w(ub_offset << 16 |
1899 (wm_ub_size - 1),
1900 vfe_dev->vfe_base +
1901 vfe_dev->hw_info->vfe_ops.axi_ops.
1902 ub_reg_offset(vfe_dev, i));
1903 ub_offset += wm_ub_size;
1904 } else {
1905 pr_err("%s: image size is zero\n", __func__);
1906 }
1907 } else {
1908 uint32_t rdi_ub_offset;
1909 int plane;
1910 int vfe_idx;
1911 struct msm_vfe_axi_stream *stream_info;
1912
1913 stream_info = msm_isp_get_stream_common_data(vfe_dev,
1914 HANDLE_TO_IDX(axi_data->free_wm[i]));
1915 if (!stream_info) {
1916 pr_err("%s: stream_info is NULL!", __func__);
1917 return;
1918 }
1919 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
1920 stream_info);
1921 for (plane = 0; plane < stream_info->num_planes;
1922 plane++)
1923 if (stream_info->wm[vfe_idx][plane] ==
1924 axi_data->free_wm[i])
1925 break;
1926
1927 rdi_ub_offset = (SRC_TO_INTF(
1928 HANDLE_TO_IDX(axi_data->free_wm[i])) -
1929 VFE_RAW_0) *
1930 axi_data->hw_info->min_wm_ub * 2;
1931 wm_ub_size = axi_data->hw_info->min_wm_ub * 2;
1932 msm_camera_io_w(rdi_ub_offset << 16 | (wm_ub_size - 1),
1933 vfe_dev->vfe_base +
1934 vfe_dev->hw_info->vfe_ops.axi_ops.
1935 ub_reg_offset(vfe_dev, i));
1936 }
1937 }
1938}
1939
1940void msm_vfe47_cfg_axi_ub_equal_slicing(
1941 struct vfe_device *vfe_dev)
1942{
1943 int i;
1944 uint32_t ub_offset = 0;
1945 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
1946 uint32_t ub_equal_slice = 0;
1947
1948 ub_equal_slice = vfe_dev->hw_info->vfe_ops.axi_ops.
1949 get_ub_size(vfe_dev) /
1950 axi_data->hw_info->num_wm;
1951 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
1952 msm_camera_io_w(ub_offset << 16 | (ub_equal_slice - 1),
1953 vfe_dev->vfe_base +
1954 vfe_dev->hw_info->vfe_ops.axi_ops.
1955 ub_reg_offset(vfe_dev, i));
1956 ub_offset += ub_equal_slice;
1957 }
1958}
1959
1960void msm_vfe47_cfg_axi_ub(struct vfe_device *vfe_dev,
1961 enum msm_vfe_input_src frame_src)
1962{
1963 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
1964
1965 axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
1966 if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
1967 msm_vfe47_cfg_axi_ub_equal_slicing(vfe_dev);
1968 else
1969 msm_vfe47_cfg_axi_ub_equal_default(vfe_dev, frame_src);
1970}
1971
1972void msm_vfe47_read_wm_ping_pong_addr(
1973 struct vfe_device *vfe_dev)
1974{
1975 msm_camera_io_dump(vfe_dev->vfe_base +
1976 (VFE47_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
1977}
1978
1979void msm_vfe47_update_ping_pong_addr(
1980 void __iomem *vfe_base,
1981 uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
1982 int32_t buf_size)
1983{
1984 uint32_t paddr32 = (paddr & 0xFFFFFFFF);
1985 uint32_t paddr32_max = 0;
1986
1987 if (buf_size < 0)
1988 buf_size = 0;
1989
1990 paddr32_max = (paddr + buf_size) & 0xFFFFFFE0;
1991
1992 msm_camera_io_w(paddr32, vfe_base +
1993 VFE47_PING_PONG_BASE(wm_idx, pingpong_bit));
1994 msm_camera_io_w(paddr32_max, vfe_base +
1995 VFE47_PING_PONG_BASE(wm_idx, pingpong_bit) + 0x4);
1996
1997}
1998
1999void msm_vfe47_set_halt_restart_mask(struct vfe_device *vfe_dev)
2000{
2001 msm_vfe47_config_irq(vfe_dev, BIT(31), BIT(8), MSM_ISP_IRQ_SET);
2002}
2003
2004int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
2005 uint32_t blocking)
2006{
2007 int rc = 0;
2008 enum msm_vfe_input_src i;
2009 uint32_t val = 0;
2010 struct msm_isp_timestamp ts;
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +05302011 unsigned long flags;
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302012
2013 val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
2014 val |= 0x1;
2015 msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
2016
2017 /* Keep only halt and reset mask */
2018 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2019 (1 << 31), (1 << 8),
2020 MSM_ISP_IRQ_SET);
2021
2022 if (atomic_read(&vfe_dev->error_info.overflow_state)
2023 == OVERFLOW_DETECTED)
2024 pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n",
2025 __func__, vfe_dev->pdev->id, blocking);
2026
2027 if (blocking) {
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +05302028 spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302029 init_completion(&vfe_dev->halt_complete);
Srikanth Uyyalaca1b8b22018-04-17 13:12:52 +05302030 spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags);
Pratap Nirujogi6e759912018-01-17 17:51:17 +05302031 /* Halt AXI Bus Bridge */
2032 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
2033 rc = wait_for_completion_interruptible_timeout(
2034 &vfe_dev->halt_complete, msecs_to_jiffies(500));
2035 if (rc <= 0)
2036 pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
2037 vfe_dev->pdev->id, rc);
2038
2039 } else {
2040 /* Halt AXI Bus Bridge */
2041 msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
2042 }
2043
2044 msm_isp_get_timestamp(&ts, vfe_dev);
2045 for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
2046 /* if any stream is waiting for update, signal fake completes */
2047 msm_isp_axi_stream_update(vfe_dev, i, &ts);
2048 msm_isp_axi_stream_update(vfe_dev, i, &ts);
2049 }
2050
2051 msm_isp_stats_stream_update(vfe_dev);
2052 msm_isp_stats_stream_update(vfe_dev);
2053
2054 return rc;
2055}
2056
2057void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
2058 uint32_t blocking, uint32_t enable_camif)
2059{
2060 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2061 vfe_dev->recovery_irq0_mask,
2062 vfe_dev->recovery_irq1_mask,
2063 MSM_ISP_IRQ_ENABLE);
2064 /* Start AXI */
2065 msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
2066
2067 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
2068 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2069 atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2070 if (enable_camif)
2071 vfe_dev->hw_info->vfe_ops.core_ops.
2072 update_camif_state(vfe_dev, ENABLE_CAMIF);
2073}
2074
2075uint32_t msm_vfe47_get_wm_mask(
2076 uint32_t irq_status0, uint32_t irq_status1)
2077{
2078 return (irq_status0 >> 8) & 0x7F;
2079}
2080
2081uint32_t msm_vfe47_get_comp_mask(
2082 uint32_t irq_status0, uint32_t irq_status1)
2083{
2084 return (irq_status0 >> 25) & 0xF;
2085}
2086
2087uint32_t msm_vfe47_get_pingpong_status(
2088 struct vfe_device *vfe_dev)
2089{
2090 return msm_camera_io_r(vfe_dev->vfe_base + 0x338);
2091}
2092
2093int msm_vfe47_get_stats_idx(enum msm_isp_stats_type stats_type)
2094{
2095 /*idx use for composite, need to map to irq status*/
2096 switch (stats_type) {
2097 case MSM_ISP_STATS_HDR_BE:
2098 return STATS_COMP_IDX_HDR_BE;
2099 case MSM_ISP_STATS_BG:
2100 return STATS_COMP_IDX_BG;
2101 case MSM_ISP_STATS_BF:
2102 return STATS_COMP_IDX_BF;
2103 case MSM_ISP_STATS_HDR_BHIST:
2104 return STATS_COMP_IDX_HDR_BHIST;
2105 case MSM_ISP_STATS_RS:
2106 return STATS_COMP_IDX_RS;
2107 case MSM_ISP_STATS_CS:
2108 return STATS_COMP_IDX_CS;
2109 case MSM_ISP_STATS_IHIST:
2110 return STATS_COMP_IDX_IHIST;
2111 case MSM_ISP_STATS_BHIST:
2112 return STATS_COMP_IDX_BHIST;
2113 case MSM_ISP_STATS_AEC_BG:
2114 return STATS_COMP_IDX_AEC_BG;
2115 default:
2116 pr_err("%s: Invalid stats type\n", __func__);
2117 return -EINVAL;
2118 }
2119}
2120
2121int msm_vfe47_stats_check_streams(
2122 struct msm_vfe_stats_stream *stream_info)
2123{
2124 return 0;
2125}
2126
2127void msm_vfe47_stats_cfg_comp_mask(
2128 struct vfe_device *vfe_dev, uint32_t stats_mask,
2129 uint8_t request_comp_index, uint8_t enable)
2130{
2131 uint32_t comp_mask_reg;
2132 atomic_t *stats_comp_mask;
2133 struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
2134
2135 if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
2136 return;
2137
2138 if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
2139 pr_err("%s: num of comp masks %d exceed max %d\n",
2140 __func__, request_comp_index,
2141 MAX_NUM_STATS_COMP_MASK);
2142 return;
2143 }
2144
2145 if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
2146 MAX_NUM_STATS_COMP_MASK) {
2147 pr_err("%s: num of comp masks %d exceed max %d\n",
2148 __func__,
2149 vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
2150 MAX_NUM_STATS_COMP_MASK);
2151 return;
2152 }
2153
2154 stats_mask = stats_mask & 0x1FF;
2155
2156 stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
2157 comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x78);
2158
2159 if (enable) {
2160 comp_mask_reg |= stats_mask << (request_comp_index * 16);
2161 atomic_set(stats_comp_mask, stats_mask |
2162 atomic_read(stats_comp_mask));
2163 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2164 1 << (29 + request_comp_index),
2165 0, MSM_ISP_IRQ_ENABLE);
2166 } else {
2167 if (!(atomic_read(stats_comp_mask) & stats_mask))
2168 return;
2169
2170 atomic_set(stats_comp_mask,
2171 ~stats_mask & atomic_read(stats_comp_mask));
2172 comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
2173 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2174 1 << (29 + request_comp_index),
2175 0, MSM_ISP_IRQ_DISABLE);
2176 }
2177
2178 msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
2179
2180 ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
2181 __func__, comp_mask_reg,
2182 atomic_read(&stats_data->stats_comp_mask[0]),
2183 atomic_read(&stats_data->stats_comp_mask[1]));
2184
2185}
2186
2187void msm_vfe47_stats_cfg_wm_irq_mask(
2188 struct vfe_device *vfe_dev,
2189 struct msm_vfe_stats_stream *stream_info)
2190{
2191 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2192 stream_info);
2193
2194 switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
2195 case STATS_COMP_IDX_AEC_BG:
2196 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2197 1 << 15, 1 << 24, MSM_ISP_IRQ_ENABLE);
2198 break;
2199 case STATS_COMP_IDX_HDR_BE:
2200 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2201 1 << 16, 1 << 16, MSM_ISP_IRQ_ENABLE);
2202 break;
2203 case STATS_COMP_IDX_BG:
2204 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2205 1 << 17, 1 << 17, MSM_ISP_IRQ_ENABLE);
2206 break;
2207 case STATS_COMP_IDX_BF:
2208 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2209 1 << 18, 1 << 26 | 1 << 18,
2210 MSM_ISP_IRQ_ENABLE);
2211 break;
2212 case STATS_COMP_IDX_HDR_BHIST:
2213 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2214 1 << 19, 1 << 19, MSM_ISP_IRQ_ENABLE);
2215 break;
2216 case STATS_COMP_IDX_RS:
2217 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2218 1 << 20, 1 << 20, MSM_ISP_IRQ_ENABLE);
2219 break;
2220 case STATS_COMP_IDX_CS:
2221 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2222 1 << 21, 1 << 21, MSM_ISP_IRQ_ENABLE);
2223 break;
2224 case STATS_COMP_IDX_IHIST:
2225 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2226 1 << 22, 1 << 22, MSM_ISP_IRQ_ENABLE);
2227 break;
2228 case STATS_COMP_IDX_BHIST:
2229 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2230 1 << 23, 1 << 23, MSM_ISP_IRQ_ENABLE);
2231 break;
2232 default:
2233 pr_err("%s: Invalid stats idx %d\n", __func__,
2234 STATS_IDX(stream_info->stream_handle[vfe_idx]));
2235 }
2236}
2237
2238void msm_vfe47_stats_clear_wm_irq_mask(
2239 struct vfe_device *vfe_dev,
2240 struct msm_vfe_stats_stream *stream_info)
2241{
2242 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2243 stream_info);
2244
2245 switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
2246 case STATS_COMP_IDX_AEC_BG:
2247 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2248 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
2249 break;
2250 case STATS_COMP_IDX_HDR_BE:
2251 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2252 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
2253 break;
2254 case STATS_COMP_IDX_BG:
2255 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2256 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
2257 break;
2258 case STATS_COMP_IDX_BF:
2259 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2260 1 << 18, 1 << 26,
2261 MSM_ISP_IRQ_DISABLE);
2262 break;
2263 case STATS_COMP_IDX_HDR_BHIST:
2264 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2265 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
2266 break;
2267 case STATS_COMP_IDX_RS:
2268 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2269 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
2270 break;
2271 case STATS_COMP_IDX_CS:
2272 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2273 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
2274 break;
2275 case STATS_COMP_IDX_IHIST:
2276 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2277 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
2278 break;
2279 case STATS_COMP_IDX_BHIST:
2280 vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
2281 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
2282 break;
2283 default:
2284 pr_err("%s: Invalid stats idx %d\n", __func__,
2285 STATS_IDX(stream_info->stream_handle[vfe_idx]));
2286 }
2287}
2288
2289void msm_vfe47_stats_cfg_wm_reg(
2290 struct vfe_device *vfe_dev,
2291 struct msm_vfe_stats_stream *stream_info)
2292{
2293 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2294 stream_info);
2295 int stats_idx;
2296 uint32_t stats_base;
2297
2298 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
2299 stats_base = VFE47_STATS_BASE(stats_idx);
2300
2301 /* WR_ADDR_CFG */
2302 msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
2303 vfe_dev->vfe_base + stats_base + 0x10);
2304 /* WR_IRQ_FRAMEDROP_PATTERN */
2305 msm_camera_io_w(stream_info->framedrop_pattern,
2306 vfe_dev->vfe_base + stats_base + 0x18);
2307 /* WR_IRQ_SUBSAMPLE_PATTERN */
2308 msm_camera_io_w(0xFFFFFFFF,
2309 vfe_dev->vfe_base + stats_base + 0x1C);
2310}
2311
2312void msm_vfe47_stats_clear_wm_reg(
2313 struct vfe_device *vfe_dev,
2314 struct msm_vfe_stats_stream *stream_info)
2315{
2316 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2317 stream_info);
2318 uint32_t val = 0;
2319 int stats_idx;
2320 uint32_t stats_base;
2321
2322 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
2323 stats_base = VFE47_STATS_BASE(stats_idx);
2324
2325 /* WR_ADDR_CFG */
2326 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
2327 /* WR_IRQ_FRAMEDROP_PATTERN */
2328 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x18);
2329 /* WR_IRQ_SUBSAMPLE_PATTERN */
2330 msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x1C);
2331}
2332
2333void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev)
2334{
2335 int i;
2336 uint32_t ub_offset = 0;
2337 uint32_t ub_size[VFE47_NUM_STATS_TYPE] = {
2338 16, /* MSM_ISP_STATS_HDR_BE */
2339 16, /* MSM_ISP_STATS_BG */
2340 16, /* MSM_ISP_STATS_BF */
2341 16, /* MSM_ISP_STATS_HDR_BHIST */
2342 16, /* MSM_ISP_STATS_RS */
2343 16, /* MSM_ISP_STATS_CS */
2344 16, /* MSM_ISP_STATS_IHIST */
2345 16, /* MSM_ISP_STATS_BHIST */
2346 16, /* MSM_ISP_STATS_AEC_BG */
2347 };
2348 if (vfe_dev->pdev->id == ISP_VFE1)
2349 ub_offset = VFE47_UB_SIZE_VFE1;
2350 else if (vfe_dev->pdev->id == ISP_VFE0)
2351 ub_offset = VFE47_UB_SIZE_VFE0;
2352 else
2353 pr_err("%s: incorrect VFE device\n", __func__);
2354
2355 for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
2356 ub_offset -= ub_size[i];
2357 msm_camera_io_w(VFE47_STATS_BURST_LEN << 30 |
2358 ub_offset << 16 | (ub_size[i] - 1),
2359 vfe_dev->vfe_base + VFE47_STATS_BASE(i) + 0x14);
2360 }
2361}
2362
2363void msm_vfe47_stats_update_cgc_override(struct vfe_device *vfe_dev,
2364 uint32_t stats_mask, uint8_t enable)
2365{
2366 int i;
2367 uint32_t module_cfg, cgc_mask = 0;
2368
2369 for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
2370 if ((stats_mask >> i) & 0x1) {
2371 switch (i) {
2372 case STATS_COMP_IDX_HDR_BE:
2373 cgc_mask |= 1;
2374 break;
2375 case STATS_COMP_IDX_BG:
2376 cgc_mask |= (1 << 3);
2377 break;
2378 case STATS_COMP_IDX_BHIST:
2379 cgc_mask |= (1 << 4);
2380 break;
2381 case STATS_COMP_IDX_RS:
2382 cgc_mask |= (1 << 5);
2383 break;
2384 case STATS_COMP_IDX_CS:
2385 cgc_mask |= (1 << 6);
2386 break;
2387 case STATS_COMP_IDX_IHIST:
2388 cgc_mask |= (1 << 7);
2389 break;
2390 case STATS_COMP_IDX_AEC_BG:
2391 cgc_mask |= (1 << 8);
2392 break;
2393 case STATS_COMP_IDX_BF:
2394 cgc_mask |= (1 << 2);
2395 break;
2396 case STATS_COMP_IDX_HDR_BHIST:
2397 cgc_mask |= (1 << 1);
2398 break;
2399 default:
2400 pr_err("%s: Invalid stats mask\n", __func__);
2401 return;
2402 }
2403 }
2404 }
2405
2406 /* CGC override: enforce BAF for DMI */
2407 module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
2408 if (enable)
2409 module_cfg |= cgc_mask;
2410 else
2411 module_cfg &= ~cgc_mask;
2412 msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x30);
2413}
2414
2415bool msm_vfe47_is_module_cfg_lock_needed(
2416 uint32_t reg_offset)
2417{
2418 return false;
2419}
2420
2421void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
2422 uint32_t stats_mask, uint8_t enable)
2423{
2424 int i;
2425 uint32_t module_cfg, module_cfg_mask = 0;
2426
2427 /* BF stats involve DMI cfg, ignore*/
2428 for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
2429 if ((stats_mask >> i) & 0x1) {
2430 switch (i) {
2431 case STATS_COMP_IDX_HDR_BE:
2432 module_cfg_mask |= 1;
2433 break;
2434 case STATS_COMP_IDX_HDR_BHIST:
2435 module_cfg_mask |= 1 << 1;
2436 break;
2437 case STATS_COMP_IDX_BF:
2438 module_cfg_mask |= 1 << 2;
2439 break;
2440 case STATS_COMP_IDX_BG:
2441 module_cfg_mask |= 1 << 3;
2442 break;
2443 case STATS_COMP_IDX_BHIST:
2444 module_cfg_mask |= 1 << 4;
2445 break;
2446 case STATS_COMP_IDX_RS:
2447 module_cfg_mask |= 1 << 5;
2448 break;
2449 case STATS_COMP_IDX_CS:
2450 module_cfg_mask |= 1 << 6;
2451 break;
2452 case STATS_COMP_IDX_IHIST:
2453 module_cfg_mask |= 1 << 7;
2454 break;
2455 case STATS_COMP_IDX_AEC_BG:
2456 module_cfg_mask |= 1 << 8;
2457 break;
2458 default:
2459 pr_err("%s: Invalid stats mask\n", __func__);
2460 return;
2461 }
2462 }
2463 }
2464
2465 module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
2466 if (enable)
2467 module_cfg |= module_cfg_mask;
2468 else
2469 module_cfg &= ~module_cfg_mask;
2470
2471 msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x44);
2472 /* enable wm if needed */
2473 if (vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm)
2474 vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm(vfe_dev,
2475 stats_mask, enable);
2476}
2477
2478void msm_vfe47_stats_update_ping_pong_addr(
2479 struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
2480 uint32_t pingpong_status, dma_addr_t paddr, uint32_t buf_size)
2481{
2482 void __iomem *vfe_base = vfe_dev->vfe_base;
2483 int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
2484 stream_info);
2485 uint32_t paddr32 = (paddr & 0xFFFFFFFF);
2486 uint32_t paddr32_max;
2487 int stats_idx;
2488
2489 stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
2490
2491 msm_camera_io_w(paddr32, vfe_base +
2492 VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
2493
2494 paddr32_max = (paddr + buf_size) & 0xFFFFFFE0;
2495 msm_camera_io_w(paddr32_max, vfe_base +
2496 VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status) + 0x4);
2497}
2498
2499uint32_t msm_vfe47_stats_get_wm_mask(
2500 uint32_t irq_status0, uint32_t irq_status1)
2501{
2502 /* TODO: define bf early done irq in status_0 and
2503 * bf pingpong done in status_1
2504 */
2505 uint32_t comp_mapped_irq_mask = 0;
2506 int i = 0;
2507
2508 /* remove early done and handle separately,
2509 * add bf idx on status 1
2510 */
2511 irq_status0 &= ~(1 << 18);
2512
2513 for (i = 0; i < VFE47_NUM_STATS_TYPE; i++)
2514 if ((irq_status0 >> stats_irq_map_comp_mask[i]) & 0x1)
2515 comp_mapped_irq_mask |= (1 << i);
2516 if ((irq_status1 >> 26) & 0x1)
2517 comp_mapped_irq_mask |= (1 << STATS_COMP_IDX_BF);
2518
2519 return comp_mapped_irq_mask;
2520}
2521
2522uint32_t msm_vfe47_stats_get_comp_mask(
2523 uint32_t irq_status0, uint32_t irq_status1)
2524{
2525 return (irq_status0 >> 29) & 0x3;
2526}
2527
2528uint32_t msm_vfe47_stats_get_frame_id(
2529 struct vfe_device *vfe_dev)
2530{
2531 return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
2532}
2533
2534void msm_vfe47_deinit_bandwidth_mgr(
2535 struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
2536{
2537 msm_bus_scale_client_update_request(
2538 isp_bandwidth_mgr->bus_client, 0);
2539 msm_bus_scale_unregister_client(isp_bandwidth_mgr->bus_client);
2540 isp_bandwidth_mgr->bus_client = 0;
2541}
2542
2543int msm_vfe47_init_bandwidth_mgr(struct vfe_device *vfe_dev,
2544 struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
2545{
2546 int rc = 0;
2547
2548 isp_bandwidth_mgr->bus_client =
2549 msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
2550 if (!isp_bandwidth_mgr->bus_client) {
2551 pr_err("%s: client register failed\n", __func__);
2552 return -EINVAL;
2553 }
2554 isp_bandwidth_mgr->bus_vector_active_idx = 1;
2555 rc = msm_bus_scale_client_update_request(
2556 isp_bandwidth_mgr->bus_client,
2557 isp_bandwidth_mgr->bus_vector_active_idx);
2558 if (rc)
2559 msm_vfe47_deinit_bandwidth_mgr(isp_bandwidth_mgr);
2560 return rc;
2561}
2562
2563int msm_vfe47_update_bandwidth(
2564 struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
2565{
2566 int i;
2567 uint64_t ab = 0;
2568 uint64_t ib = 0;
2569 struct msm_bus_paths *path = NULL;
2570
2571 ALT_VECTOR_IDX(isp_bandwidth_mgr->bus_vector_active_idx);
2572 path = &(msm_isp_bus_client_pdata.usecase[
2573 isp_bandwidth_mgr->bus_vector_active_idx]);
2574 path->vectors[0].ab = 0;
2575 path->vectors[0].ib = 0;
2576 for (i = 0; i < MAX_ISP_CLIENT; i++) {
2577 if (isp_bandwidth_mgr->client_info[i].active) {
2578 path->vectors[0].ab +=
2579 isp_bandwidth_mgr->client_info[i].ab;
2580 path->vectors[0].ib +=
2581 isp_bandwidth_mgr->client_info[i].ib;
2582 ab += isp_bandwidth_mgr->client_info[i].ab;
2583 ib += isp_bandwidth_mgr->client_info[i].ib;
2584 }
2585 }
2586 msm_bus_scale_client_update_request(isp_bandwidth_mgr->bus_client,
2587 isp_bandwidth_mgr->bus_vector_active_idx);
2588 /* Insert into circular buffer */
2589 msm_isp_update_req_history(isp_bandwidth_mgr->bus_client,
2590 ab, ib,
2591 isp_bandwidth_mgr->client_info,
2592 sched_clock());
2593 return 0;
2594}
2595
2596int msm_vfe47_get_clks(struct vfe_device *vfe_dev)
2597{
2598 int i, rc;
2599 struct clk *stream_clk;
2600 struct msm_cam_clk_info clk_info;
2601
2602 rc = msm_camera_get_clk_info(vfe_dev->pdev, &vfe_dev->vfe_clk_info,
2603 &vfe_dev->vfe_clk, &vfe_dev->num_clk);
2604 if (rc)
2605 return rc;
2606
2607 vfe_dev->num_norm_clk = vfe_dev->num_clk;
2608 for (i = 0; i < vfe_dev->num_clk; i++) {
2609 if (strcmp(vfe_dev->vfe_clk_info[i].clk_name,
2610 "camss_vfe_stream_clk") == 0) {
2611 stream_clk = vfe_dev->vfe_clk[i];
2612 clk_info = vfe_dev->vfe_clk_info[i];
2613 vfe_dev->num_hvx_clk = 1;
2614 vfe_dev->num_norm_clk = vfe_dev->num_clk - 1;
2615 break;
2616 }
2617 }
2618 if (i >= vfe_dev->num_clk)
2619 pr_err("%s: cannot find camss_vfe_stream_clk\n", __func__);
2620 else {
2621 /* Switch stream_clk to the last element*/
2622 for (; i < vfe_dev->num_clk - 1; i++) {
2623 vfe_dev->vfe_clk[i] = vfe_dev->vfe_clk[i+1];
2624 vfe_dev->vfe_clk_info[i] = vfe_dev->vfe_clk_info[i+1];
2625 }
2626 vfe_dev->vfe_clk_info[vfe_dev->num_clk-1] = clk_info;
2627 vfe_dev->vfe_clk[vfe_dev->num_clk-1] = stream_clk;
2628 vfe_dev->hvx_clk_info =
2629 &vfe_dev->vfe_clk_info[vfe_dev->num_clk-1];
2630 vfe_dev->hvx_clk = &vfe_dev->vfe_clk[vfe_dev->num_clk-1];
2631 vfe_dev->hvx_clk_state = false;
2632 }
2633
2634 for (i = 0; i < vfe_dev->num_clk; i++) {
2635 if (strcmp(vfe_dev->vfe_clk_info[i].clk_name,
2636 "vfe_clk_src") == 0)
2637 vfe_dev->hw_info->vfe_clk_idx = i;
2638 }
2639 return 0;
2640}
2641
2642void msm_vfe47_put_clks(struct vfe_device *vfe_dev)
2643{
2644 msm_camera_put_clk_info(vfe_dev->pdev, &vfe_dev->vfe_clk_info,
2645 &vfe_dev->vfe_clk, vfe_dev->num_clk);
2646
2647 vfe_dev->num_clk = 0;
2648 vfe_dev->hvx_clk = NULL;
2649 vfe_dev->hvx_clk_info = NULL;
2650 vfe_dev->num_hvx_clk = 0;
2651 vfe_dev->num_norm_clk = 0;
2652}
2653
2654int msm_vfe47_enable_clks(struct vfe_device *vfe_dev, int enable)
2655{
2656 return msm_camera_clk_enable(&vfe_dev->pdev->dev,
2657 vfe_dev->vfe_clk_info,
2658 vfe_dev->vfe_clk, vfe_dev->num_norm_clk, enable);
2659}
2660
2661int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
2662{
2663 int rc = 0;
2664 int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
2665 int ret;
2666 long clk_rate, prev_clk_rate;
2667
2668 clk_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
2669 if (vfe_dev->vfe_clk_info[clk_idx].clk_rate == clk_rate)
2670 return rc;
2671
2672 prev_clk_rate =
2673 vfe_dev->vfe_clk_info[clk_idx].clk_rate;
2674 vfe_dev->vfe_clk_info[clk_idx].clk_rate =
2675 clk_rate;
2676 /*
2677 * if cx_ipeak is supported vote first so that dsp throttling is
2678 * reduced before we go to turbo
2679 */
2680 if ((vfe_dev->vfe_cx_ipeak) &&
2681 (vfe_dev->vfe_clk_info[clk_idx].clk_rate >=
2682 vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
2683 [vfe_dev->hw_info->vfe_clk_idx]) &&
2684 prev_clk_rate <
2685 vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
2686 [vfe_dev->hw_info->vfe_clk_idx]) {
2687 ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
2688 if (ret) {
2689 pr_err("%s: cx_ipeak_update failed %d\n",
2690 __func__, ret);
2691 return ret;
2692 }
2693 }
2694 /*set vfe clock*/
2695 rc = msm_camera_clk_set_rate(&vfe_dev->pdev->dev,
2696 vfe_dev->vfe_clk[clk_idx], *rate);
2697 if (rc < 0)
2698 return rc;
2699 /*
2700 * if cx_ipeak is supported remove the vote for non-turbo clock and
2701 * if voting done earlier
2702 */
2703 if ((vfe_dev->vfe_cx_ipeak) &&
2704 (vfe_dev->vfe_clk_info[clk_idx].clk_rate <
2705 vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
2706 [vfe_dev->hw_info->vfe_clk_idx]) &&
2707 prev_clk_rate >=
2708 vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
2709 [vfe_dev->hw_info->vfe_clk_idx]) {
2710 ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
2711 if (ret) {
2712 pr_err("%s: cx_ipeak_update failed %d\n",
2713 __func__, ret);
2714 return ret;
2715 }
2716 }
2717 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
2718 vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg(vfe_dev, NULL);
2719 return 0;
2720}
2721
2722int msm_vfe47_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
2723{
2724 int clk_idx = 0;
2725 unsigned long max_value = ~0;
2726 long round_rate = 0;
2727
2728 if (!vfe_dev || !rate) {
2729 pr_err("%s:%d failed: vfe_dev %pK rate %pK\n", __func__,
2730 __LINE__, vfe_dev, rate);
2731 return -EINVAL;
2732 }
2733
2734 *rate = 0;
2735 if (!vfe_dev->hw_info) {
2736 pr_err("%s:%d failed: vfe_dev->hw_info %pK\n", __func__,
2737 __LINE__, vfe_dev->hw_info);
2738 return -EINVAL;
2739 }
2740
2741 clk_idx = vfe_dev->hw_info->vfe_clk_idx;
2742 if (clk_idx >= vfe_dev->num_clk) {
2743 pr_err("%s:%d failed: clk_idx %d max array size %zd\n",
2744 __func__, __LINE__, clk_idx,
2745 vfe_dev->num_clk);
2746 return -EINVAL;
2747 }
2748
2749 round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
2750 if (round_rate < 0) {
2751 pr_err("%s: Invalid vfe clock rate\n", __func__);
2752 return -EINVAL;
2753 }
2754
2755 *rate = round_rate;
2756 return 0;
2757}
2758
2759int msm_vfe47_get_clk_rates(struct vfe_device *vfe_dev,
2760 struct msm_isp_clk_rates *rates)
2761{
2762 struct device_node *of_node;
2763 int32_t rc = 0;
2764 uint32_t svs = 0, nominal = 0, turbo = 0;
2765
2766 if (!vfe_dev || !rates) {
2767 pr_err("%s:%d failed: vfe_dev %pK rates %pK\n", __func__,
2768 __LINE__, vfe_dev, rates);
2769 return -EINVAL;
2770 }
2771
2772 if (!vfe_dev->pdev) {
2773 pr_err("%s:%d failed: vfe_dev->pdev %pK\n", __func__,
2774 __LINE__, vfe_dev->pdev);
2775 return -EINVAL;
2776 }
2777
2778 of_node = vfe_dev->pdev->dev.of_node;
2779
2780 if (!of_node) {
2781 pr_err("%s %d failed: of_node = %pK\n", __func__,
2782 __LINE__, of_node);
2783 return -EINVAL;
2784 }
2785
2786 /*
2787 * Many older targets dont define svs.
2788 * return svs=0 for older targets.
2789 */
2790 rc = of_property_read_u32(of_node, "max-clk-svs",
2791 &svs);
2792 if (rc < 0)
2793 svs = 0;
2794
2795 rc = of_property_read_u32(of_node, "max-clk-nominal",
2796 &nominal);
2797 if (rc < 0 || !nominal) {
2798 pr_err("%s: nominal rate error\n", __func__);
2799 return -EINVAL;
2800 }
2801
2802 rc = of_property_read_u32(of_node, "max-clk-turbo",
2803 &turbo);
2804 if (rc < 0 || !turbo) {
2805 pr_err("%s: turbo rate error\n", __func__);
2806 return -EINVAL;
2807 }
2808 rates->svs_rate = svs;
2809 rates->nominal_rate = nominal;
2810 rates->high_rate = turbo;
2811 return 0;
2812}
2813
2814void msm_vfe47_put_regulators(struct vfe_device *vfe_dev)
2815{
2816 int i;
2817
2818 for (i = 0; i < vfe_dev->vfe_num_regulators; i++)
2819 regulator_put(vfe_dev->regulator_info[i].vdd);
2820
2821 vfe_dev->vfe_num_regulators = 0;
2822 kfree(vfe_dev->regulator_info);
2823 vfe_dev->regulator_info = NULL;
2824}
2825
2826int msm_vfe47_get_regulators(struct vfe_device *vfe_dev)
2827{
2828 int rc = 0;
2829 int i;
2830
2831 vfe_dev->vfe_num_regulators =
2832 sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *);
2833
2834 vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) *
2835 vfe_dev->vfe_num_regulators, GFP_KERNEL);
2836 if (!vfe_dev->regulator_info)
2837 return -ENOMEM;
2838
2839 for (i = 0; i < vfe_dev->vfe_num_regulators; i++) {
2840 vfe_dev->regulator_info[i].vdd = regulator_get(
2841 &vfe_dev->pdev->dev,
2842 vfe_dev->hw_info->regulator_names[i]);
2843 if (IS_ERR(vfe_dev->regulator_info[i].vdd)) {
2844 pr_err("%s: Regulator vfe get failed %ld\n", __func__,
2845 PTR_ERR(vfe_dev->regulator_info[i].vdd));
2846 rc = -ENODEV;
2847 goto reg_get_fail;
2848 }
2849 }
2850 return 0;
2851
2852reg_get_fail:
2853 for (i--; i >= 0; i--)
2854 regulator_put(vfe_dev->regulator_info[i].vdd);
2855 kfree(vfe_dev->regulator_info);
2856 vfe_dev->regulator_info = NULL;
2857 return rc;
2858}
2859
2860int msm_vfe47_enable_regulators(struct vfe_device *vfe_dev, int enable)
2861{
2862 return msm_camera_regulator_enable(vfe_dev->regulator_info,
2863 vfe_dev->vfe_num_regulators, enable);
2864}
2865
2866int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev)
2867{
2868 int rc = 0;
2869 void __iomem *vfe_fuse_base;
2870 uint32_t vfe_fuse_base_size;
2871
2872 vfe_dev->vfe_base = msm_camera_get_reg_base(vfe_dev->pdev, "vfe", 0);
2873 if (!vfe_dev->vfe_base)
2874 return -ENOMEM;
2875 vfe_dev->vfe_vbif_base = msm_camera_get_reg_base(vfe_dev->pdev,
2876 "vfe_vbif", 0);
2877 if (!vfe_dev->vfe_vbif_base) {
2878 rc = -ENOMEM;
2879 goto vbif_base_fail;
2880 }
2881
2882 vfe_dev->vfe_irq = msm_camera_get_irq(vfe_dev->pdev, "vfe");
2883 if (!vfe_dev->vfe_irq) {
2884 rc = -ENODEV;
2885 goto vfe_irq_fail;
2886 }
2887
2888 vfe_dev->vfe_base_size = msm_camera_get_res_size(vfe_dev->pdev, "vfe");
2889 vfe_dev->vfe_vbif_base_size = msm_camera_get_res_size(vfe_dev->pdev,
2890 "vfe_vbif");
2891 if (!vfe_dev->vfe_base_size || !vfe_dev->vfe_vbif_base_size) {
2892 rc = -ENOMEM;
2893 goto get_res_fail;
2894 }
2895 vfe_dev->vfe_hw_limit = 0;
2896 vfe_fuse_base = msm_camera_get_reg_base(vfe_dev->pdev,
2897 "vfe_fuse", 0);
2898 vfe_fuse_base_size = msm_camera_get_res_size(vfe_dev->pdev,
2899 "vfe_fuse");
2900 if (vfe_fuse_base) {
2901 if (vfe_fuse_base_size)
2902 vfe_dev->vfe_hw_limit =
2903 (msm_camera_io_r(vfe_fuse_base) >> 5) & 0x1;
2904 msm_camera_put_reg_base(vfe_dev->pdev, vfe_fuse_base,
2905 "vfe_fuse", 0);
2906 }
2907 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_regulators(vfe_dev);
2908 if (rc)
2909 goto get_regulator_fail;
2910
2911 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clks(vfe_dev);
2912 if (rc)
2913 goto get_clkcs_fail;
2914
2915 rc = msm_camera_register_irq(vfe_dev->pdev, vfe_dev->vfe_irq,
2916 msm_isp_process_irq,
2917 IRQF_TRIGGER_RISING, "vfe", vfe_dev);
2918 if (rc < 0)
2919 goto irq_register_fail;
2920
2921 msm_camera_enable_irq(vfe_dev->vfe_irq, 0);
2922
2923 rc = msm_isp_init_bandwidth_mgr(vfe_dev, ISP_VFE0 + vfe_dev->pdev->id);
2924 if (rc)
2925 goto init_bw_fail;
2926
2927 return 0;
2928
2929init_bw_fail:
2930 msm_camera_unregister_irq(vfe_dev->pdev, vfe_dev->vfe_irq, "vfe");
2931irq_register_fail:
2932 vfe_dev->hw_info->vfe_ops.platform_ops.put_clks(vfe_dev);
2933get_clkcs_fail:
2934 vfe_dev->hw_info->vfe_ops.platform_ops.put_regulators(vfe_dev);
2935get_regulator_fail:
2936get_res_fail:
2937 vfe_dev->vfe_vbif_base_size = 0;
2938 vfe_dev->vfe_base_size = 0;
2939vfe_irq_fail:
2940 msm_camera_put_reg_base(vfe_dev->pdev, vfe_dev->vfe_base,
2941 "vfe_vbif", 0);
2942vbif_base_fail:
2943 msm_camera_put_reg_base(vfe_dev->pdev, vfe_dev->vfe_base, "vfe", 0);
2944 return rc;
2945}
2946
2947void msm_vfe47_get_error_mask(
2948 uint32_t *error_mask0, uint32_t *error_mask1)
2949{
2950 *error_mask0 = 0x00000000;
2951 *error_mask1 = 0x0BFFFEFF;
2952}
2953
2954void msm_vfe47_get_overflow_mask(uint32_t *overflow_mask)
2955{
2956 *overflow_mask = 0x09FFFE7E;
2957}
2958
2959void msm_vfe47_get_rdi_wm_mask(struct vfe_device *vfe_dev,
2960 uint32_t *rdi_wm_mask)
2961{
2962 *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
2963}
2964
2965void msm_vfe47_get_irq_mask(struct vfe_device *vfe_dev,
2966 uint32_t *irq0_mask, uint32_t *irq1_mask)
2967{
2968 *irq0_mask = vfe_dev->irq0_mask;
2969 *irq1_mask = vfe_dev->irq1_mask;
2970}
2971
2972void msm_vfe47_get_halt_restart_mask(uint32_t *irq0_mask,
2973 uint32_t *irq1_mask)
2974{
2975 *irq0_mask = BIT(31);
2976 *irq1_mask = BIT(8);
2977}
2978
2979static struct msm_vfe_axi_hardware_info msm_vfe47_axi_hw_info = {
2980 .num_wm = 7,
2981 .num_comp_mask = 3,
2982 .num_rdi = 3,
2983 .num_rdi_master = 3,
2984 .min_wm_ub = 96,
2985 .scratch_buf_range = SZ_32M + SZ_4M,
2986};
2987
2988static struct msm_vfe_stats_hardware_info msm_vfe47_stats_hw_info = {
2989 .stats_capability_mask =
2990 1 << MSM_ISP_STATS_HDR_BE | 1 << MSM_ISP_STATS_BF |
2991 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
2992 1 << MSM_ISP_STATS_HDR_BHIST | 1 << MSM_ISP_STATS_IHIST |
2993 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
2994 1 << MSM_ISP_STATS_AEC_BG,
2995 .stats_ping_pong_offset = stats_pingpong_offset_map,
2996 .num_stats_type = VFE47_NUM_STATS_TYPE,
2997 .num_stats_comp_mask = VFE47_NUM_STATS_COMP,
2998};
2999
3000struct msm_vfe_hardware_info vfe47_hw_info = {
3001 .num_iommu_ctx = 1,
3002 .num_iommu_secure_ctx = 0,
3003 .vfe_clk_idx = VFE47_SRC_CLK_DTSI_IDX,
3004 .runtime_axi_update = 1,
3005 .min_ib = 100000000,
3006 .min_ab = 100000000,
3007 .vfe_ops = {
3008 .irq_ops = {
3009 .read_and_clear_irq_status =
3010 msm_vfe47_read_and_clear_irq_status,
3011 .process_camif_irq = msm_vfe47_process_input_irq,
3012 .process_reset_irq = msm_vfe47_process_reset_irq,
3013 .process_halt_irq = msm_vfe47_process_halt_irq,
3014 .process_reg_update = msm_vfe47_process_reg_update,
3015 .process_axi_irq = msm_isp_process_axi_irq,
3016 .process_stats_irq = msm_isp_process_stats_irq,
3017 .process_epoch_irq = msm_vfe47_process_epoch_irq,
3018 .config_irq = msm_vfe47_config_irq,
3019 .read_irq_status = msm_vfe47_read_irq_status,
3020 .preprocess_camif_irq = msm_isp47_preprocess_camif_irq,
3021 },
3022 .axi_ops = {
3023 .reload_wm = msm_vfe47_axi_reload_wm,
3024 .enable_wm = msm_vfe47_axi_enable_wm,
3025 .cfg_io_format = msm_vfe47_cfg_io_format,
3026 .cfg_comp_mask = msm_vfe47_axi_cfg_comp_mask,
3027 .clear_comp_mask = msm_vfe47_axi_clear_comp_mask,
3028 .cfg_wm_irq_mask = msm_vfe47_axi_cfg_wm_irq_mask,
3029 .clear_wm_irq_mask = msm_vfe47_axi_clear_wm_irq_mask,
3030 .cfg_framedrop = msm_vfe47_cfg_framedrop,
3031 .clear_framedrop = msm_vfe47_clear_framedrop,
3032 .cfg_wm_reg = msm_vfe47_axi_cfg_wm_reg,
3033 .clear_wm_reg = msm_vfe47_axi_clear_wm_reg,
3034 .cfg_wm_xbar_reg = msm_vfe47_axi_cfg_wm_xbar_reg,
3035 .clear_wm_xbar_reg = msm_vfe47_axi_clear_wm_xbar_reg,
3036 .cfg_ub = msm_vfe47_cfg_axi_ub,
3037 .read_wm_ping_pong_addr =
3038 msm_vfe47_read_wm_ping_pong_addr,
3039 .update_ping_pong_addr =
3040 msm_vfe47_update_ping_pong_addr,
3041 .get_comp_mask = msm_vfe47_get_comp_mask,
3042 .get_wm_mask = msm_vfe47_get_wm_mask,
3043 .get_pingpong_status = msm_vfe47_get_pingpong_status,
3044 .halt = msm_vfe47_axi_halt,
3045 .restart = msm_vfe47_axi_restart,
3046 .update_cgc_override =
3047 msm_vfe47_axi_update_cgc_override,
3048 .ub_reg_offset = msm_vfe47_ub_reg_offset,
3049 .get_ub_size = msm_vfe47_get_ub_size,
3050 },
3051 .core_ops = {
3052 .reg_update = msm_vfe47_reg_update,
3053 .cfg_input_mux = msm_vfe47_cfg_input_mux,
3054 .update_camif_state = msm_vfe47_update_camif_state,
3055 .start_fetch_eng = msm_vfe47_start_fetch_engine,
3056 .cfg_rdi_reg = msm_vfe47_cfg_rdi_reg,
3057 .reset_hw = msm_vfe47_reset_hardware,
3058 .init_hw = msm_vfe47_init_hardware,
3059 .init_hw_reg = msm_vfe47_init_hardware_reg,
3060 .clear_status_reg = msm_vfe47_clear_status_reg,
3061 .release_hw = msm_vfe47_release_hardware,
3062 .get_error_mask = msm_vfe47_get_error_mask,
3063 .get_overflow_mask = msm_vfe47_get_overflow_mask,
3064 .get_rdi_wm_mask = msm_vfe47_get_rdi_wm_mask,
3065 .get_irq_mask = msm_vfe47_get_irq_mask,
3066 .get_halt_restart_mask =
3067 msm_vfe47_get_halt_restart_mask,
3068 .process_error_status = msm_vfe47_process_error_status,
3069 .is_module_cfg_lock_needed =
3070 msm_vfe47_is_module_cfg_lock_needed,
3071 .ahb_clk_cfg = msm_isp47_ahb_clk_cfg,
3072 .start_fetch_eng_multi_pass =
3073 msm_vfe47_start_fetch_engine_multi_pass,
3074 .set_halt_restart_mask =
3075 msm_vfe47_set_halt_restart_mask,
3076 .set_bus_err_ign_mask = NULL,
3077 .get_bus_err_mask = NULL,
3078 },
3079 .stats_ops = {
3080 .get_stats_idx = msm_vfe47_get_stats_idx,
3081 .check_streams = msm_vfe47_stats_check_streams,
3082 .cfg_comp_mask = msm_vfe47_stats_cfg_comp_mask,
3083 .cfg_wm_irq_mask = msm_vfe47_stats_cfg_wm_irq_mask,
3084 .clear_wm_irq_mask = msm_vfe47_stats_clear_wm_irq_mask,
3085 .cfg_wm_reg = msm_vfe47_stats_cfg_wm_reg,
3086 .clear_wm_reg = msm_vfe47_stats_clear_wm_reg,
3087 .cfg_ub = msm_vfe47_stats_cfg_ub,
3088 .enable_module = msm_vfe47_stats_enable_module,
3089 .update_ping_pong_addr =
3090 msm_vfe47_stats_update_ping_pong_addr,
3091 .get_comp_mask = msm_vfe47_stats_get_comp_mask,
3092 .get_wm_mask = msm_vfe47_stats_get_wm_mask,
3093 .get_frame_id = msm_vfe47_stats_get_frame_id,
3094 .get_pingpong_status = msm_vfe47_get_pingpong_status,
3095 .update_cgc_override =
3096 msm_vfe47_stats_update_cgc_override,
3097 .enable_stats_wm = NULL,
3098 },
3099 .platform_ops = {
3100 .get_platform_data = msm_vfe47_get_platform_data,
3101 .enable_regulators = msm_vfe47_enable_regulators,
3102 .get_regulators = msm_vfe47_get_regulators,
3103 .put_regulators = msm_vfe47_put_regulators,
3104 .enable_clks = msm_vfe47_enable_clks,
3105 .get_clks = msm_vfe47_get_clks,
3106 .put_clks = msm_vfe47_put_clks,
3107 .get_clk_rates = msm_vfe47_get_clk_rates,
3108 .get_max_clk_rate = msm_vfe47_get_max_clk_rate,
3109 .set_clk_rate = msm_vfe47_set_clk_rate,
3110 .init_bw_mgr = msm_vfe47_init_bandwidth_mgr,
3111 .deinit_bw_mgr = msm_vfe47_deinit_bandwidth_mgr,
3112 .update_bw = msm_vfe47_update_bandwidth,
3113 }
3114 },
3115 .dmi_reg_offset = 0xC2C,
3116 .axi_hw_info = &msm_vfe47_axi_hw_info,
3117 .stats_hw_info = &msm_vfe47_stats_hw_info,
3118 .regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"},
3119};
3120EXPORT_SYMBOL(vfe47_hw_info);
3121
3122static const struct of_device_id msm_vfe47_dt_match[] = {
3123 {
3124 .compatible = "qcom,vfe47",
3125 .data = &vfe47_hw_info,
3126 },
3127 {}
3128};
3129
3130MODULE_DEVICE_TABLE(of, msm_vfe47_dt_match);
3131
3132static struct platform_driver vfe47_driver = {
3133 .probe = vfe_hw_probe,
3134 .driver = {
3135 .name = "msm_vfe47",
3136 .owner = THIS_MODULE,
3137 .of_match_table = msm_vfe47_dt_match,
3138 },
3139};
3140
3141static int __init msm_vfe47_init_module(void)
3142{
3143 return platform_driver_register(&vfe47_driver);
3144}
3145
3146static void __exit msm_vfe47_exit_module(void)
3147{
3148 platform_driver_unregister(&vfe47_driver);
3149}
3150
3151module_init(msm_vfe47_init_module);
3152module_exit(msm_vfe47_exit_module);
3153MODULE_DESCRIPTION("MSM VFE47 driver");
3154MODULE_LICENSE("GPL v2");