blob: 51c8e4af4fdc7ba4a70aa4ab4dbe294620b22b3a [file] [log] [blame]
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "cam_fd_hw_core.h"
14#include "cam_fd_hw_soc.h"
Junzhe Zou5fa08b12017-08-15 10:08:12 -070015#include "cam_trace.h"
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -070016
17#define CAM_FD_REG_VAL_PAIR_SIZE 256
18
19static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
20 uint32_t index, uint32_t reg_offset, uint32_t reg_value)
21{
22 buffer[index++] = reg_offset;
23 buffer[index++] = reg_value;
24
25 CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
26 reg_offset, reg_value);
27
28 return index;
29}
30
31static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
32 enum cam_cdm_cb_status status, uint32_t cookie)
33{
Junzhe Zou5fa08b12017-08-15 10:08:12 -070034 trace_cam_cdm_cb("FD", status);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -070035 CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
36 handle, userdata, status, cookie);
37}
38
39static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
40{
41 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
42 struct cam_fd_hw_static_info *hw_static_info =
43 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
44
45 if (hw_static_info->enable_errata_wa.single_irq_only == false) {
46 /* Enable IRQs here */
47 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
48 hw_static_info->wrapper_regs.irq_mask,
49 hw_static_info->irq_mask);
50 }
51
52 /* QoS settings */
53 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
54 hw_static_info->wrapper_regs.vbif_req_priority,
55 hw_static_info->qos_priority);
56 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
57 hw_static_info->wrapper_regs.vbif_priority_level,
58 hw_static_info->qos_priority_level);
59}
60
61int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
62 struct cam_fd_hw_caps *hw_caps)
63{
64 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
65 struct cam_fd_hw_static_info *hw_static_info =
66 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
67 uint32_t reg_value;
68
69 if (!hw_static_info) {
70 CAM_ERR(CAM_FD, "Invalid hw info data");
71 return -EINVAL;
72 }
73
74 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
75 hw_static_info->core_regs.version);
76 hw_caps->core_version.major =
77 CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
78 hw_caps->core_version.minor =
79 CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
80 hw_caps->core_version.incr =
81 CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
82
83 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
84 hw_static_info->wrapper_regs.wrapper_version);
85 hw_caps->wrapper_version.major =
86 CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
87 hw_caps->wrapper_version.minor =
88 CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
89 hw_caps->wrapper_version.incr =
90 CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
91
92 hw_caps->raw_results_available =
93 hw_static_info->results.raw_results_available;
94 hw_caps->supported_modes = hw_static_info->supported_modes;
95
96 CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
97 hw_caps->core_version.major, hw_caps->core_version.minor,
98 hw_caps->core_version.incr, hw_caps->wrapper_version.major,
99 hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
100 hw_caps->raw_results_available);
101
102 return 0;
103}
104
105static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
106{
107 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
108 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
109 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
110 long time_left;
111
112 /* Before triggering reset to HW, clear the reset complete */
113 reinit_completion(&fd_core->reset_complete);
114
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700115 cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
116 hw_static_info->core_regs.control, 0x1);
117
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700118 if (hw_static_info->enable_errata_wa.single_irq_only) {
119 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
120 hw_static_info->wrapper_regs.irq_mask,
121 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
122 }
123
124 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
125 hw_static_info->wrapper_regs.sw_reset, 0x1);
126
127 time_left = wait_for_completion_timeout(&fd_core->reset_complete,
128 msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
Pavan Kumar Chilamkurthi09751412017-09-18 18:16:06 -0700129 if (time_left <= 0)
130 CAM_WARN(CAM_FD, "HW reset timeout time_left=%d", time_left);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700131
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700132 CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
133
134 return 0;
135}
136
137
138static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
139{
140 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
141 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
142 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
143 long time_left;
144
145 /* Before triggering halt to HW, clear halt complete */
146 reinit_completion(&fd_core->halt_complete);
147
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700148 if (hw_static_info->enable_errata_wa.single_irq_only) {
149 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
150 hw_static_info->wrapper_regs.irq_mask,
151 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
152 }
153
154 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
155 hw_static_info->wrapper_regs.hw_stop, 0x1);
156
157 time_left = wait_for_completion_timeout(&fd_core->halt_complete,
158 msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
Pavan Kumar Chilamkurthi09751412017-09-18 18:16:06 -0700159 if (time_left <= 0)
160 CAM_WARN(CAM_FD, "HW halt timeout time_left=%d", time_left);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700161
162 CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
163
164 return 0;
165}
166
167static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
168 struct cam_fd_hw_cmd_prestart_args *prestart_args)
169{
170 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
171 struct cam_fd_hw_static_info *hw_static_info =
172 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
173 struct cam_fd_ctx_hw_private *ctx_hw_private =
174 prestart_args->ctx_hw_private;
175 uint32_t size, size_required = 0;
176 uint32_t mem_base;
177 uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
178 uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
179 uint32_t num_cmds = 0;
180 int i;
181 struct cam_fd_hw_io_buffer *io_buf;
182 struct cam_fd_hw_req_private *req_private;
183 uint32_t available_size = prestart_args->size;
184 bool work_buffer_configured = false;
185
186 if (!ctx_hw_private || !cmd_buf_addr) {
187 CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
188 ctx_hw_private, cmd_buf_addr);
189 return -EINVAL;
190 }
191
192 if (prestart_args->get_raw_results &&
193 !hw_static_info->results.raw_results_available) {
194 CAM_ERR(CAM_FD, "Raw results not supported %d %d",
195 prestart_args->get_raw_results,
196 hw_static_info->results.raw_results_available);
197 return -EINVAL;
198 }
199
200 req_private = &prestart_args->hw_req_private;
201 req_private->ctx_hw_private = prestart_args->ctx_hw_private;
202 req_private->request_id = prestart_args->request_id;
203 req_private->get_raw_results = prestart_args->get_raw_results;
204 req_private->fd_results = NULL;
205 req_private->raw_results = NULL;
206
207 /* Start preparing CDM register values that KMD has to insert */
208 num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
209 hw_static_info->core_regs.control, 0x1);
210 num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
211 hw_static_info->core_regs.control, 0x0);
212
213 for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
214 io_buf = &prestart_args->input_buf[i];
215
216 if (io_buf->valid == false)
217 break;
218
219 if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
220 CAM_ERR(CAM_FD, "Incorrect direction %d %d",
221 io_buf->io_cfg->direction, CAM_BUF_INPUT);
222 return -EINVAL;
223 }
224
225 switch (io_buf->io_cfg->resource_type) {
226 case CAM_FD_INPUT_PORT_ID_IMAGE: {
227 if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
228 CAM_ERR(CAM_FD,
229 "Invalid reg_val pair size %d, %d",
230 num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
231 return -EINVAL;
232 }
233
234 num_cmds = cam_fd_cdm_write_reg_val_pair(
235 reg_val_pair, num_cmds,
236 hw_static_info->core_regs.image_addr,
237 io_buf->io_addr[0]);
238 break;
239 }
240 default:
241 CAM_ERR(CAM_FD, "Invalid resource type %d",
242 io_buf->io_cfg->resource_type);
243 return -EINVAL;
244 }
245 }
246
247 for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
248 io_buf = &prestart_args->output_buf[i];
249
250 if (io_buf->valid == false)
251 break;
252
253 if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
254 CAM_ERR(CAM_FD, "Incorrect direction %d %d",
255 io_buf->io_cfg->direction, CAM_BUF_INPUT);
256 return -EINVAL;
257 }
258
259 switch (io_buf->io_cfg->resource_type) {
260 case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
261 uint32_t face_results_offset;
262
263 size_required = hw_static_info->results.max_faces *
264 hw_static_info->results.per_face_entries * 4;
265
266 if (io_buf->io_cfg->planes[0].plane_stride <
267 size_required) {
268 CAM_ERR(CAM_FD, "Invalid results size %d %d",
269 io_buf->io_cfg->planes[0].plane_stride,
270 size_required);
271 return -EINVAL;
272 }
273
274 req_private->fd_results =
275 (struct cam_fd_results *)io_buf->cpu_addr[0];
276
277 face_results_offset =
278 (uint8_t *)&req_private->fd_results->faces[0] -
279 (uint8_t *)req_private->fd_results;
280
281 if (hw_static_info->ro_mode_supported) {
282 if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
283 CAM_ERR(CAM_FD,
284 "Invalid reg_val size %d, %d",
285 num_cmds,
286 CAM_FD_REG_VAL_PAIR_SIZE);
287 return -EINVAL;
288 }
289 /*
290 * Face data actually starts 16bytes later in
291 * the io buffer Check cam_fd_results.
292 */
293 num_cmds = cam_fd_cdm_write_reg_val_pair(
294 reg_val_pair, num_cmds,
295 hw_static_info->core_regs.result_addr,
296 io_buf->io_addr[0] +
297 face_results_offset);
298 num_cmds = cam_fd_cdm_write_reg_val_pair(
299 reg_val_pair, num_cmds,
300 hw_static_info->core_regs.ro_mode,
301 0x1);
302
303 req_private->ro_mode_enabled = true;
304 } else {
305 req_private->ro_mode_enabled = false;
306 }
307 break;
308 }
309 case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
310 size_required =
311 hw_static_info->results.raw_results_entries *
312 sizeof(uint32_t);
313
314 if (io_buf->io_cfg->planes[0].plane_stride <
315 size_required) {
316 CAM_ERR(CAM_FD, "Invalid results size %d %d",
317 io_buf->io_cfg->planes[0].plane_stride,
318 size_required);
319 return -EINVAL;
320 }
321
322 req_private->raw_results =
323 (uint32_t *)io_buf->cpu_addr[0];
324 break;
325 }
326 case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
327 if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
328 CAM_ERR(CAM_FD,
329 "Invalid reg_val pair size %d, %d",
330 num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
331 return -EINVAL;
332 }
333
334 num_cmds = cam_fd_cdm_write_reg_val_pair(
335 reg_val_pair, num_cmds,
336 hw_static_info->core_regs.work_addr,
337 io_buf->io_addr[0]);
338
339 work_buffer_configured = true;
340 break;
341 }
342 default:
343 CAM_ERR(CAM_FD, "Invalid resource type %d",
344 io_buf->io_cfg->resource_type);
345 return -EINVAL;
346 }
347 }
348
349 if (!req_private->fd_results || !work_buffer_configured) {
350 CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
351 req_private->fd_results, work_buffer_configured);
352 return -EINVAL;
353 }
354
355 /* First insert CHANGE_BASE command */
356 size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
357 /* since cdm returns dwords, we need to convert it into bytes */
358 if ((size * 4) > available_size) {
359 CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
360 prestart_args->size, size);
361 return -EINVAL;
362 }
363
364 mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
365 ((struct cam_fd_soc_private *)soc_info->soc_private)->
366 regbase_index[CAM_FD_REG_CORE]);
367
368 ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
369 cmd_buf_addr += size;
370 available_size -= (size * 4);
371
372 size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
373 num_cmds/2);
374 /* cdm util returns dwords, need to convert to bytes */
375 if ((size * 4) > available_size) {
376 CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
377 available_size, size);
378 return -ENOMEM;
379 }
380 ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
381 reg_val_pair);
382 cmd_buf_addr += size;
383 available_size -= (size * 4);
384
385 /* Update pre_config_buf_size in bytes */
386 prestart_args->pre_config_buf_size =
387 prestart_args->size - available_size;
388
Junzhe Zou256437ab2017-08-07 17:20:45 -0700389 /* Insert start trigger command into CDM as post config commands. */
390 num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0,
391 hw_static_info->core_regs.control, 0x2);
392 size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
393 num_cmds/2);
394 if ((size * 4) > available_size) {
395 CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
396 available_size, size);
397 return -ENOMEM;
398 }
399 ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
400 reg_val_pair);
401 cmd_buf_addr += size;
402 available_size -= (size * 4);
403
404 prestart_args->post_config_buf_size = size * 4;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700405
406 CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
407 prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
408 cmd_buf_addr, prestart_args->post_config_buf_size);
409
410 for (i = 0; i < (prestart_args->pre_config_buf_size +
411 prestart_args->post_config_buf_size) / 4; i++)
412 CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
413 &prestart_args->cmd_buf_addr[i],
414 prestart_args->cmd_buf_addr[i]);
415
416 return 0;
417}
418
419static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
420 struct cam_fd_hw_frame_done_args *frame_done_args)
421{
422 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
423 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
424 struct cam_fd_hw_req_private *req_private;
425 uint32_t base, face_cnt;
426 uint32_t *buffer;
Harsh Shah97fd0712017-10-09 15:50:46 -0700427 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700428 int i;
429
Harsh Shah97fd0712017-10-09 15:50:46 -0700430 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700431 if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
432 (fd_core->results_valid == false) ||
433 !fd_core->hw_req_private) {
434 CAM_ERR(CAM_FD,
435 "Invalid state for results state=%d, results=%d %pK",
436 fd_core->core_state, fd_core->results_valid,
437 fd_core->hw_req_private);
Harsh Shah97fd0712017-10-09 15:50:46 -0700438 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700439 return -EINVAL;
440 }
441 fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
442 req_private = fd_core->hw_req_private;
Harsh Shah97fd0712017-10-09 15:50:46 -0700443 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700444
445 /*
446 * Copy the register value as is into output buffers.
447 * Wehter we are copying the output data by reading registers or
448 * programming output buffer directly to HW must be transparent to UMD.
449 * In case HW supports writing face count value directly into
450 * DDR memory in future, these values should match.
451 */
452 req_private->fd_results->face_count =
453 cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
454 hw_static_info->core_regs.result_cnt);
455
456 face_cnt = req_private->fd_results->face_count & 0x3F;
457
458 if (face_cnt > hw_static_info->results.max_faces) {
459 CAM_WARN(CAM_FD, "Face count greater than max %d %d",
460 face_cnt, hw_static_info->results.max_faces);
461 face_cnt = hw_static_info->results.max_faces;
462 }
463
464 CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
465 req_private->request_id, face_cnt);
466
467 /*
468 * We need to read the face data information from registers only
469 * if one of below is true
470 * 1. RO mode is not set. i.e FD HW doesn't write face data into
471 * DDR memory
472 * 2. On the current chipset, results written into DDR memory by FD HW
473 * are not gauranteed to be correct
474 */
475 if (!req_private->ro_mode_enabled ||
476 hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
477 buffer = (uint32_t *)&req_private->fd_results->faces[0];
478 base = hw_static_info->core_regs.results_reg_base;
479
480 /*
481 * Write register values as is into face data buffer. Its UMD
482 * driver responsibility to interpret the data and extract face
483 * properties from output buffer. Think in case output buffer
484 * is directly programmed to HW, then KMD has no control to
485 * extract the face properties and UMD anyway has to extract
486 * face properties. So we follow the same approach and keep
487 * this transparent to UMD.
488 */
489 for (i = 0;
490 i < (face_cnt *
491 hw_static_info->results.per_face_entries); i++) {
492 *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
493 CAM_FD_REG_CORE, base + (i * 0x4));
494 CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
495 buffer++;
496 }
497 }
498
499 if (req_private->get_raw_results &&
500 req_private->raw_results &&
501 hw_static_info->results.raw_results_available) {
502 buffer = req_private->raw_results;
503 base = hw_static_info->core_regs.raw_results_reg_base;
504
505 for (i = 0;
506 i < hw_static_info->results.raw_results_entries;
507 i++) {
508 *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
509 CAM_FD_REG_CORE, base + (i * 0x4));
510 CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
511 buffer++;
512 }
513 }
514
Harsh Shah97fd0712017-10-09 15:50:46 -0700515 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700516 fd_core->hw_req_private = NULL;
517 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
Harsh Shah97fd0712017-10-09 15:50:46 -0700518 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700519
520 return 0;
521}
522
523irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
524{
525 struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
526 struct cam_fd_core *fd_core;
527 struct cam_hw_soc_info *soc_info;
528 struct cam_fd_hw_static_info *hw_static_info;
529 uint32_t reg_value;
530 enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
531 uint32_t num_irqs = 0;
532
533 if (!fd_hw) {
534 CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
535 return -EINVAL;
536 }
537
538 fd_core = (struct cam_fd_core *) fd_hw->core_info;
539 soc_info = &fd_hw->soc_info;
540 hw_static_info = fd_core->hw_static_info;
541
542 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
543 hw_static_info->wrapper_regs.irq_status);
544
545 CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
546
547 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
548 complete_all(&fd_core->halt_complete);
549 irq_type = CAM_FD_IRQ_HALT_DONE;
550 num_irqs++;
551 }
552
553 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
554 complete_all(&fd_core->reset_complete);
555 irq_type = CAM_FD_IRQ_RESET_DONE;
556 num_irqs++;
557 }
558
559 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
560 complete_all(&fd_core->processing_complete);
561 irq_type = CAM_FD_IRQ_FRAME_DONE;
562 num_irqs++;
563 }
564
565 /*
566 * We should never get an IRQ callback with no or more than one mask.
567 * Validate first to make sure nothing going wrong.
568 */
569 if (num_irqs != 1) {
570 CAM_ERR(CAM_FD,
571 "Invalid number of IRQs, value=0x%x, num_irqs=%d",
572 reg_value, num_irqs);
573 return -EINVAL;
574 }
575
Junzhe Zou5fa08b12017-08-15 10:08:12 -0700576 trace_cam_irq_activated("FD", irq_type);
577
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700578 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
579 hw_static_info->wrapper_regs.irq_clear,
580 hw_static_info->irq_mask);
581
582 if (irq_type == CAM_FD_IRQ_HALT_DONE) {
583 /*
584 * Do not send HALT IRQ callback to Hw Mgr,
585 * a reset would always follow
586 */
587 return IRQ_HANDLED;
588 }
589
590 spin_lock(&fd_core->spin_lock);
591 /* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
592 if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
593 (irq_type == CAM_FD_IRQ_FRAME_DONE)) {
594
595 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
596 if (irq_type == CAM_FD_IRQ_FRAME_DONE)
597 fd_core->results_valid = true;
598
599 CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
600 irq_type, fd_core->core_state);
601 }
602 spin_unlock(&fd_core->spin_lock);
603
604 if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
605 fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
606 irq_type);
607
608 return IRQ_HANDLED;
609}
610
611int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
612 uint32_t arg_size)
613{
614 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
615 struct cam_fd_core *fd_core;
616 struct cam_fd_hw_caps *fd_hw_caps =
617 (struct cam_fd_hw_caps *)get_hw_cap_args;
618
619 if (!hw_priv || !get_hw_cap_args) {
620 CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
621 hw_priv, get_hw_cap_args);
622 return -EINVAL;
623 }
624
625 fd_core = (struct cam_fd_core *)fd_hw->core_info;
626 *fd_hw_caps = fd_core->hw_caps;
627
628 CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
629 fd_hw_caps->core_version.major,
630 fd_hw_caps->core_version.minor,
631 fd_hw_caps->wrapper_version.major,
632 fd_hw_caps->wrapper_version.minor,
633 fd_hw_caps->supported_modes,
634 fd_hw_caps->raw_results_available);
635
636 return 0;
637}
638
639int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
640{
641 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
642 struct cam_fd_core *fd_core;
643 struct cam_fd_hw_init_args *init_args =
644 (struct cam_fd_hw_init_args *)init_hw_args;
645 int rc = 0;
Harsh Shah69afdf82017-11-07 05:11:03 -0800646 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700647
648 if (!fd_hw || !init_args) {
649 CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
650 return -EINVAL;
651 }
652
653 if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
654 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
655 sizeof(struct cam_fd_hw_init_args));
656 return -EINVAL;
657 }
658
659 fd_core = (struct cam_fd_core *)fd_hw->core_info;
660
661 mutex_lock(&fd_hw->hw_mutex);
662 CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
663
664 if (fd_hw->open_count > 0) {
665 rc = 0;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700666 goto cdm_streamon;
667 }
668
669 rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
670 if (rc) {
671 CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
672 goto unlock_return;
673 }
674
Harsh Shah69afdf82017-11-07 05:11:03 -0800675 spin_lock_irqsave(&fd_core->spin_lock, flags);
676 fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
677 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
678 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
679
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700680 rc = cam_fd_hw_reset(hw_priv, NULL, 0);
681 if (rc) {
682 CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
683 goto disable_soc;
684 }
685
686 cam_fd_hw_util_enable_power_on_settings(fd_hw);
687
Harsh Shah266ca562017-10-03 16:09:41 -0700688cdm_streamon:
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700689 fd_hw->open_count++;
690 CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
691
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700692 if (init_args->ctx_hw_private) {
693 struct cam_fd_ctx_hw_private *ctx_hw_private =
694 init_args->ctx_hw_private;
695
696 rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
697 if (rc) {
698 CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
699 ctx_hw_private->cdm_handle, rc);
Harsh Shah33fc2eb2017-11-06 04:32:30 -0800700 fd_hw->open_count--;
701 if (!fd_hw->open_count)
702 goto disable_soc;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700703 }
704 }
705
Harsh Shah33fc2eb2017-11-06 04:32:30 -0800706 mutex_unlock(&fd_hw->hw_mutex);
707
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700708 return rc;
709
710disable_soc:
711 if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
712 CAM_ERR(CAM_FD, "Error in disable soc resources");
Harsh Shah69afdf82017-11-07 05:11:03 -0800713
714 spin_lock_irqsave(&fd_core->spin_lock, flags);
715 fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
716 fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
717 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700718unlock_return:
719 mutex_unlock(&fd_hw->hw_mutex);
720 return rc;
721}
722
723int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
724{
725 struct cam_hw_info *fd_hw = hw_priv;
Harsh Shah266ca562017-10-03 16:09:41 -0700726 struct cam_fd_core *fd_core = NULL;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700727 struct cam_fd_hw_deinit_args *deinit_args =
728 (struct cam_fd_hw_deinit_args *)deinit_hw_args;
729 int rc = 0;
Harsh Shah69afdf82017-11-07 05:11:03 -0800730 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700731
732 if (!fd_hw || !deinit_hw_args) {
733 CAM_ERR(CAM_FD, "Invalid argument");
734 return -EINVAL;
735 }
736
737 if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
738 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
739 sizeof(struct cam_fd_hw_deinit_args));
740 return -EINVAL;
741 }
742
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700743 mutex_lock(&fd_hw->hw_mutex);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700744 if (fd_hw->open_count == 0) {
745 mutex_unlock(&fd_hw->hw_mutex);
746 CAM_ERR(CAM_FD, "Error Unbalanced deinit");
747 return -EFAULT;
748 }
749
750 fd_hw->open_count--;
751 CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
752
Harsh Shah266ca562017-10-03 16:09:41 -0700753 if (fd_hw->open_count > 0) {
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700754 rc = 0;
Harsh Shah266ca562017-10-03 16:09:41 -0700755 goto positive_ref_cnt;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700756 }
757
758 rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
759 if (rc)
760 CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
761
762 fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
Harsh Shah266ca562017-10-03 16:09:41 -0700763 fd_core = (struct cam_fd_core *)fd_hw->core_info;
764
765 /* With the ref_cnt correct, this should never happen */
766 WARN_ON(!fd_core);
767
Harsh Shah69afdf82017-11-07 05:11:03 -0800768 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700769 fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
Harsh Shah69afdf82017-11-07 05:11:03 -0800770 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Harsh Shah266ca562017-10-03 16:09:41 -0700771positive_ref_cnt:
772 if (deinit_args->ctx_hw_private) {
773 struct cam_fd_ctx_hw_private *ctx_hw_private =
774 deinit_args->ctx_hw_private;
775
776 rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
777 if (rc) {
778 CAM_ERR(CAM_FD,
779 "Failed in CDM StreamOff, handle=0x%x, rc=%d",
780 ctx_hw_private->cdm_handle, rc);
781 }
782 }
783
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700784 mutex_unlock(&fd_hw->hw_mutex);
785 return rc;
786}
787
788int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
789{
790 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
791 struct cam_fd_core *fd_core;
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700792 struct cam_fd_hw_static_info *hw_static_info;
793 struct cam_hw_soc_info *soc_info;
Harsh Shah97fd0712017-10-09 15:50:46 -0700794 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700795 int rc;
796
797 if (!fd_hw) {
798 CAM_ERR(CAM_FD, "Invalid input handle");
799 return -EINVAL;
800 }
801
802 fd_core = (struct cam_fd_core *)fd_hw->core_info;
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700803 hw_static_info = fd_core->hw_static_info;
804 soc_info = &fd_hw->soc_info;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700805
Harsh Shah97fd0712017-10-09 15:50:46 -0700806 spin_lock_irqsave(&fd_core->spin_lock, flags);
Harsh Shah69afdf82017-11-07 05:11:03 -0800807 if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
808 (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700809 CAM_ERR(CAM_FD, "Reset not allowed in %d state",
810 fd_core->core_state);
Harsh Shah97fd0712017-10-09 15:50:46 -0700811 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700812 return -EINVAL;
813 }
814
815 fd_core->results_valid = false;
816 fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
Harsh Shah97fd0712017-10-09 15:50:46 -0700817 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700818
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700819 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
820 hw_static_info->wrapper_regs.cgc_disable, 0x1);
821
Pavan Kumar Chilamkurthi09751412017-09-18 18:16:06 -0700822 rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
823 if (rc) {
824 CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
825 return rc;
826 }
827
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700828 rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
829 if (rc) {
830 CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
831 return rc;
832 }
833
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700834 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
835 hw_static_info->wrapper_regs.cgc_disable, 0x0);
836
Harsh Shah97fd0712017-10-09 15:50:46 -0700837 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700838 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
Harsh Shah97fd0712017-10-09 15:50:46 -0700839 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700840
841 return rc;
842}
843
844int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
845{
846 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
847 struct cam_fd_core *fd_core;
848 struct cam_fd_hw_static_info *hw_static_info;
849 struct cam_fd_hw_cmd_start_args *start_args =
850 (struct cam_fd_hw_cmd_start_args *)hw_start_args;
851 struct cam_fd_ctx_hw_private *ctx_hw_private;
Harsh Shah97fd0712017-10-09 15:50:46 -0700852 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700853 int rc;
854
855 if (!hw_priv || !start_args) {
856 CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
857 start_args);
858 return -EINVAL;
859 }
860
861 if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
862 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
863 sizeof(struct cam_fd_hw_cmd_start_args));
864 return -EINVAL;
865 }
866
867 fd_core = (struct cam_fd_core *)fd_hw->core_info;
868 hw_static_info = fd_core->hw_static_info;
869
Harsh Shah97fd0712017-10-09 15:50:46 -0700870 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700871 if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
872 CAM_ERR(CAM_FD, "Cannot start in %d state",
873 fd_core->core_state);
Harsh Shah97fd0712017-10-09 15:50:46 -0700874 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700875 return -EINVAL;
876 }
877
878 /*
879 * We are about to start FD HW processing, save the request
880 * private data which is being processed by HW. Once the frame
881 * processing is finished, process_cmd(FRAME_DONE) should be called
882 * with same hw_req_private as input.
883 */
884 fd_core->hw_req_private = start_args->hw_req_private;
885 fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
886 fd_core->results_valid = false;
Harsh Shah97fd0712017-10-09 15:50:46 -0700887 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700888
889 ctx_hw_private = start_args->ctx_hw_private;
890
891 /* Before starting HW process, clear processing complete */
892 reinit_completion(&fd_core->processing_complete);
893
894 if (hw_static_info->enable_errata_wa.single_irq_only) {
895 cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
896 hw_static_info->wrapper_regs.irq_mask,
897 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
898 }
899
900 if (start_args->num_hw_update_entries > 0) {
901 struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
902 struct cam_hw_update_entry *cmd;
903 int i;
904
905 cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
906 cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
907 cdm_cmd->flag = false;
908 cdm_cmd->userdata = NULL;
909 cdm_cmd->cookie = 0;
910
911 for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
912 cmd = (start_args->hw_update_entries + i);
913 cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
914 cdm_cmd->cmd[i].offset = cmd->offset;
915 cdm_cmd->cmd[i].len = cmd->len;
916 }
917
918 rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
919 if (rc) {
920 CAM_ERR(CAM_FD,
921 "Failed to submit cdm commands, rc=%d", rc);
922 goto error;
923 }
924 } else {
925 CAM_ERR(CAM_FD, "Invalid number of hw update entries");
926 rc = -EINVAL;
927 goto error;
928 }
929
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700930 return 0;
931error:
Harsh Shah97fd0712017-10-09 15:50:46 -0700932 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700933 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
Harsh Shah97fd0712017-10-09 15:50:46 -0700934 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700935
936 return rc;
937}
938
939int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
940{
941 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
942 struct cam_fd_core *fd_core;
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700943 struct cam_fd_hw_static_info *hw_static_info;
944 struct cam_hw_soc_info *soc_info;
Harsh Shah97fd0712017-10-09 15:50:46 -0700945 unsigned long flags;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700946 int rc;
947
948 if (!fd_hw) {
949 CAM_ERR(CAM_FD, "Invalid input handle");
950 return -EINVAL;
951 }
952
953 fd_core = (struct cam_fd_core *)fd_hw->core_info;
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700954 hw_static_info = fd_core->hw_static_info;
955 soc_info = &fd_hw->soc_info;
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700956
Harsh Shah97fd0712017-10-09 15:50:46 -0700957 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700958 if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
959 (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
960 CAM_ERR(CAM_FD, "Reset not allowed in %d state",
961 fd_core->core_state);
Harsh Shah97fd0712017-10-09 15:50:46 -0700962 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700963 return -EINVAL;
964 }
965
966 fd_core->results_valid = false;
967 fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
Harsh Shah97fd0712017-10-09 15:50:46 -0700968 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700969
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700970 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
971 hw_static_info->wrapper_regs.cgc_disable, 0x1);
972
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700973 rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
974 if (rc) {
975 CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
976 return rc;
977 }
978
979 /* HALT must be followed by RESET */
980 rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
981 if (rc) {
982 CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
983 return rc;
984 }
985
Gautham Mayyuri635ef342017-10-06 23:26:04 -0700986 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
987 hw_static_info->wrapper_regs.cgc_disable, 0x0);
988
Harsh Shah97fd0712017-10-09 15:50:46 -0700989 spin_lock_irqsave(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700990 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
Harsh Shah97fd0712017-10-09 15:50:46 -0700991 spin_unlock_irqrestore(&fd_core->spin_lock, flags);
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -0700992
993 return rc;
994}
995
996int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
997{
998 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
999 int rc = -EINVAL;
1000 struct cam_fd_ctx_hw_private *ctx_hw_private;
1001 struct cam_fd_hw_reserve_args *reserve_args =
1002 (struct cam_fd_hw_reserve_args *)hw_reserve_args;
1003 struct cam_cdm_acquire_data cdm_acquire;
1004 struct cam_cdm_bl_request *cdm_cmd;
1005 int i;
1006
1007 if (!fd_hw || !reserve_args) {
1008 CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
1009 return -EINVAL;
1010 }
1011
1012 if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
1013 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
1014 sizeof(struct cam_fd_hw_reserve_args));
1015 return -EINVAL;
1016 }
1017
1018 cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
1019 ((CAM_FD_MAX_HW_ENTRIES - 1) *
1020 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
1021 if (!cdm_cmd)
1022 return -ENOMEM;
1023
1024 ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
1025 GFP_KERNEL);
1026 if (!ctx_hw_private) {
1027 kfree(cdm_cmd);
1028 return -ENOMEM;
1029 }
1030
1031 memset(&cdm_acquire, 0, sizeof(cdm_acquire));
1032 strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
1033 cdm_acquire.cell_index = fd_hw->soc_info.index;
1034 cdm_acquire.handle = 0;
1035 cdm_acquire.userdata = ctx_hw_private;
1036 cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
1037 cdm_acquire.id = CAM_CDM_VIRTUAL;
1038 cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
1039 for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
1040 cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
1041
1042 rc = cam_cdm_acquire(&cdm_acquire);
1043 if (rc) {
1044 CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
1045 goto error;
1046 }
1047
1048 ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
1049 ctx_hw_private->fd_hw = fd_hw;
1050 ctx_hw_private->mode = reserve_args->mode;
1051 ctx_hw_private->cdm_handle = cdm_acquire.handle;
1052 ctx_hw_private->cdm_ops = cdm_acquire.ops;
1053 ctx_hw_private->cdm_cmd = cdm_cmd;
1054
1055 reserve_args->ctx_hw_private = ctx_hw_private;
1056
1057 CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
1058 ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
1059 ctx_hw_private->cdm_handle);
1060
1061 return 0;
1062error:
1063 kfree(ctx_hw_private);
1064 kfree(cdm_cmd);
1065 return rc;
1066}
1067
1068int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
1069{
1070 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
1071 int rc = -EINVAL;
1072 struct cam_fd_ctx_hw_private *ctx_hw_private;
1073 struct cam_fd_hw_release_args *release_args =
1074 (struct cam_fd_hw_release_args *)hw_release_args;
1075
1076 if (!fd_hw || !release_args) {
1077 CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
1078 return -EINVAL;
1079 }
1080
1081 if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
1082 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
1083 sizeof(struct cam_fd_hw_release_args));
1084 return -EINVAL;
1085 }
1086
1087 ctx_hw_private =
1088 (struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
1089
1090 rc = cam_cdm_release(ctx_hw_private->cdm_handle);
1091 if (rc)
1092 CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
1093 ctx_hw_private->cdm_handle, rc);
1094
1095 kfree(ctx_hw_private);
1096 release_args->ctx_hw_private = NULL;
1097
1098 return 0;
1099}
1100
1101int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
1102 void *cmd_args, uint32_t arg_size)
1103{
1104 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
1105 int rc = -EINVAL;
1106
1107 if (!hw_priv || !cmd_args ||
1108 (cmd_type >= CAM_FD_HW_CMD_MAX)) {
1109 CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
1110 cmd_args, cmd_type);
1111 return -EINVAL;
1112 }
1113
1114 switch (cmd_type) {
1115 case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
1116 struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
1117 struct cam_fd_core *fd_core =
1118 (struct cam_fd_core *)fd_hw->core_info;
1119
1120 if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
1121 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1122 cmd_type, arg_size);
1123 break;
1124 }
1125
1126 irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
1127 fd_core->irq_cb.cam_fd_hw_mgr_cb =
1128 irq_cb_args->cam_fd_hw_mgr_cb;
1129 fd_core->irq_cb.data = irq_cb_args->data;
1130 rc = 0;
1131 break;
1132 }
1133 case CAM_FD_HW_CMD_PRESTART: {
1134 struct cam_fd_hw_cmd_prestart_args *prestart_args;
1135
1136 if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
1137 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1138 cmd_type, arg_size);
1139 break;
1140 }
1141
1142 prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
1143 rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
1144 break;
1145 }
1146 case CAM_FD_HW_CMD_FRAME_DONE: {
1147 struct cam_fd_hw_frame_done_args *cmd_frame_results;
1148
1149 if (sizeof(struct cam_fd_hw_frame_done_args) !=
1150 arg_size) {
1151 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1152 cmd_type, arg_size);
1153 break;
1154 }
1155
1156 cmd_frame_results =
1157 (struct cam_fd_hw_frame_done_args *)cmd_args;
1158 rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
1159 cmd_frame_results);
1160 break;
1161 }
1162 default:
1163 break;
1164 }
1165
1166 return rc;
1167}