blob: 51fcdcaadb6f39301e87746e4bbb776ee5f377e8 [file] [log] [blame]
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "cam_fd_hw_core.h"
14#include "cam_fd_hw_soc.h"
15
16#define CAM_FD_REG_VAL_PAIR_SIZE 256
17
18static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
19 uint32_t index, uint32_t reg_offset, uint32_t reg_value)
20{
21 buffer[index++] = reg_offset;
22 buffer[index++] = reg_value;
23
24 CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
25 reg_offset, reg_value);
26
27 return index;
28}
29
30static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
31 enum cam_cdm_cb_status status, uint32_t cookie)
32{
33 CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
34 handle, userdata, status, cookie);
35}
36
37static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
38{
39 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
40 struct cam_fd_hw_static_info *hw_static_info =
41 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
42
43 if (hw_static_info->enable_errata_wa.single_irq_only == false) {
44 /* Enable IRQs here */
45 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
46 hw_static_info->wrapper_regs.irq_mask,
47 hw_static_info->irq_mask);
48 }
49
50 /* QoS settings */
51 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
52 hw_static_info->wrapper_regs.vbif_req_priority,
53 hw_static_info->qos_priority);
54 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
55 hw_static_info->wrapper_regs.vbif_priority_level,
56 hw_static_info->qos_priority_level);
57}
58
59int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
60 struct cam_fd_hw_caps *hw_caps)
61{
62 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
63 struct cam_fd_hw_static_info *hw_static_info =
64 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
65 uint32_t reg_value;
66
67 if (!hw_static_info) {
68 CAM_ERR(CAM_FD, "Invalid hw info data");
69 return -EINVAL;
70 }
71
72 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
73 hw_static_info->core_regs.version);
74 hw_caps->core_version.major =
75 CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
76 hw_caps->core_version.minor =
77 CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
78 hw_caps->core_version.incr =
79 CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
80
81 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
82 hw_static_info->wrapper_regs.wrapper_version);
83 hw_caps->wrapper_version.major =
84 CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
85 hw_caps->wrapper_version.minor =
86 CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
87 hw_caps->wrapper_version.incr =
88 CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
89
90 hw_caps->raw_results_available =
91 hw_static_info->results.raw_results_available;
92 hw_caps->supported_modes = hw_static_info->supported_modes;
93
94 CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
95 hw_caps->core_version.major, hw_caps->core_version.minor,
96 hw_caps->core_version.incr, hw_caps->wrapper_version.major,
97 hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
98 hw_caps->raw_results_available);
99
100 return 0;
101}
102
103static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
104{
105 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
106 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
107 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
108 long time_left;
109
110 /* Before triggering reset to HW, clear the reset complete */
111 reinit_completion(&fd_core->reset_complete);
112
113 cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
114 hw_static_info->core_regs.control, 0x1);
115
116 if (hw_static_info->enable_errata_wa.single_irq_only) {
117 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
118 hw_static_info->wrapper_regs.irq_mask,
119 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
120 }
121
122 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
123 hw_static_info->wrapper_regs.sw_reset, 0x1);
124
125 time_left = wait_for_completion_timeout(&fd_core->reset_complete,
126 msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
127 if (time_left <= 0) {
128 CAM_ERR(CAM_FD, "HW reset wait failed time_left=%d", time_left);
129 return -EPERM;
130 }
131
132 cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
133 hw_static_info->core_regs.control, 0x0);
134
135 CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
136
137 return 0;
138}
139
140
141static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
142{
143 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
144 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
145 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
146 long time_left;
147
148 /* Before triggering halt to HW, clear halt complete */
149 reinit_completion(&fd_core->halt_complete);
150
151 cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
152 hw_static_info->core_regs.control, 0x1);
153
154 if (hw_static_info->enable_errata_wa.single_irq_only) {
155 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
156 hw_static_info->wrapper_regs.irq_mask,
157 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
158 }
159
160 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
161 hw_static_info->wrapper_regs.hw_stop, 0x1);
162
163 time_left = wait_for_completion_timeout(&fd_core->halt_complete,
164 msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
165 if (time_left <= 0) {
166 CAM_ERR(CAM_FD, "HW halt wait failed time_left=%d", time_left);
167 return -EPERM;
168 }
169
170 cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
171 hw_static_info->core_regs.control, 0x0);
172
173 CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
174
175 return 0;
176}
177
178static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
179 struct cam_fd_hw_cmd_prestart_args *prestart_args)
180{
181 struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
182 struct cam_fd_hw_static_info *hw_static_info =
183 ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
184 struct cam_fd_ctx_hw_private *ctx_hw_private =
185 prestart_args->ctx_hw_private;
186 uint32_t size, size_required = 0;
187 uint32_t mem_base;
188 uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
189 uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
190 uint32_t num_cmds = 0;
191 int i;
192 struct cam_fd_hw_io_buffer *io_buf;
193 struct cam_fd_hw_req_private *req_private;
194 uint32_t available_size = prestart_args->size;
195 bool work_buffer_configured = false;
196
197 if (!ctx_hw_private || !cmd_buf_addr) {
198 CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
199 ctx_hw_private, cmd_buf_addr);
200 return -EINVAL;
201 }
202
203 if (prestart_args->get_raw_results &&
204 !hw_static_info->results.raw_results_available) {
205 CAM_ERR(CAM_FD, "Raw results not supported %d %d",
206 prestart_args->get_raw_results,
207 hw_static_info->results.raw_results_available);
208 return -EINVAL;
209 }
210
211 req_private = &prestart_args->hw_req_private;
212 req_private->ctx_hw_private = prestart_args->ctx_hw_private;
213 req_private->request_id = prestart_args->request_id;
214 req_private->get_raw_results = prestart_args->get_raw_results;
215 req_private->fd_results = NULL;
216 req_private->raw_results = NULL;
217
218 /* Start preparing CDM register values that KMD has to insert */
219 num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
220 hw_static_info->core_regs.control, 0x1);
221 num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
222 hw_static_info->core_regs.control, 0x0);
223
224 for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
225 io_buf = &prestart_args->input_buf[i];
226
227 if (io_buf->valid == false)
228 break;
229
230 if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
231 CAM_ERR(CAM_FD, "Incorrect direction %d %d",
232 io_buf->io_cfg->direction, CAM_BUF_INPUT);
233 return -EINVAL;
234 }
235
236 switch (io_buf->io_cfg->resource_type) {
237 case CAM_FD_INPUT_PORT_ID_IMAGE: {
238 if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
239 CAM_ERR(CAM_FD,
240 "Invalid reg_val pair size %d, %d",
241 num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
242 return -EINVAL;
243 }
244
245 num_cmds = cam_fd_cdm_write_reg_val_pair(
246 reg_val_pair, num_cmds,
247 hw_static_info->core_regs.image_addr,
248 io_buf->io_addr[0]);
249 break;
250 }
251 default:
252 CAM_ERR(CAM_FD, "Invalid resource type %d",
253 io_buf->io_cfg->resource_type);
254 return -EINVAL;
255 }
256 }
257
258 for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
259 io_buf = &prestart_args->output_buf[i];
260
261 if (io_buf->valid == false)
262 break;
263
264 if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
265 CAM_ERR(CAM_FD, "Incorrect direction %d %d",
266 io_buf->io_cfg->direction, CAM_BUF_INPUT);
267 return -EINVAL;
268 }
269
270 switch (io_buf->io_cfg->resource_type) {
271 case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
272 uint32_t face_results_offset;
273
274 size_required = hw_static_info->results.max_faces *
275 hw_static_info->results.per_face_entries * 4;
276
277 if (io_buf->io_cfg->planes[0].plane_stride <
278 size_required) {
279 CAM_ERR(CAM_FD, "Invalid results size %d %d",
280 io_buf->io_cfg->planes[0].plane_stride,
281 size_required);
282 return -EINVAL;
283 }
284
285 req_private->fd_results =
286 (struct cam_fd_results *)io_buf->cpu_addr[0];
287
288 face_results_offset =
289 (uint8_t *)&req_private->fd_results->faces[0] -
290 (uint8_t *)req_private->fd_results;
291
292 if (hw_static_info->ro_mode_supported) {
293 if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
294 CAM_ERR(CAM_FD,
295 "Invalid reg_val size %d, %d",
296 num_cmds,
297 CAM_FD_REG_VAL_PAIR_SIZE);
298 return -EINVAL;
299 }
300 /*
301 * Face data actually starts 16bytes later in
302 * the io buffer Check cam_fd_results.
303 */
304 num_cmds = cam_fd_cdm_write_reg_val_pair(
305 reg_val_pair, num_cmds,
306 hw_static_info->core_regs.result_addr,
307 io_buf->io_addr[0] +
308 face_results_offset);
309 num_cmds = cam_fd_cdm_write_reg_val_pair(
310 reg_val_pair, num_cmds,
311 hw_static_info->core_regs.ro_mode,
312 0x1);
313
314 req_private->ro_mode_enabled = true;
315 } else {
316 req_private->ro_mode_enabled = false;
317 }
318 break;
319 }
320 case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
321 size_required =
322 hw_static_info->results.raw_results_entries *
323 sizeof(uint32_t);
324
325 if (io_buf->io_cfg->planes[0].plane_stride <
326 size_required) {
327 CAM_ERR(CAM_FD, "Invalid results size %d %d",
328 io_buf->io_cfg->planes[0].plane_stride,
329 size_required);
330 return -EINVAL;
331 }
332
333 req_private->raw_results =
334 (uint32_t *)io_buf->cpu_addr[0];
335 break;
336 }
337 case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
338 if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
339 CAM_ERR(CAM_FD,
340 "Invalid reg_val pair size %d, %d",
341 num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
342 return -EINVAL;
343 }
344
345 num_cmds = cam_fd_cdm_write_reg_val_pair(
346 reg_val_pair, num_cmds,
347 hw_static_info->core_regs.work_addr,
348 io_buf->io_addr[0]);
349
350 work_buffer_configured = true;
351 break;
352 }
353 default:
354 CAM_ERR(CAM_FD, "Invalid resource type %d",
355 io_buf->io_cfg->resource_type);
356 return -EINVAL;
357 }
358 }
359
360 if (!req_private->fd_results || !work_buffer_configured) {
361 CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
362 req_private->fd_results, work_buffer_configured);
363 return -EINVAL;
364 }
365
366 /* First insert CHANGE_BASE command */
367 size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
368 /* since cdm returns dwords, we need to convert it into bytes */
369 if ((size * 4) > available_size) {
370 CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
371 prestart_args->size, size);
372 return -EINVAL;
373 }
374
375 mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
376 ((struct cam_fd_soc_private *)soc_info->soc_private)->
377 regbase_index[CAM_FD_REG_CORE]);
378
379 ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
380 cmd_buf_addr += size;
381 available_size -= (size * 4);
382
383 size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
384 num_cmds/2);
385 /* cdm util returns dwords, need to convert to bytes */
386 if ((size * 4) > available_size) {
387 CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
388 available_size, size);
389 return -ENOMEM;
390 }
391 ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
392 reg_val_pair);
393 cmd_buf_addr += size;
394 available_size -= (size * 4);
395
396 /* Update pre_config_buf_size in bytes */
397 prestart_args->pre_config_buf_size =
398 prestart_args->size - available_size;
399
400 /*
401 * Currently, no post config commands, we trigger HW start directly
402 * from start(). Start trigger command can be inserted into CDM
403 * as post config commands.
404 */
405 prestart_args->post_config_buf_size = 0;
406
407 CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
408 prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
409 cmd_buf_addr, prestart_args->post_config_buf_size);
410
411 for (i = 0; i < (prestart_args->pre_config_buf_size +
412 prestart_args->post_config_buf_size) / 4; i++)
413 CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
414 &prestart_args->cmd_buf_addr[i],
415 prestart_args->cmd_buf_addr[i]);
416
417 return 0;
418}
419
420static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
421 struct cam_fd_hw_frame_done_args *frame_done_args)
422{
423 struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
424 struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
425 struct cam_fd_hw_req_private *req_private;
426 uint32_t base, face_cnt;
427 uint32_t *buffer;
428 int i;
429
430 spin_lock(&fd_core->spin_lock);
431 if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
432 (fd_core->results_valid == false) ||
433 !fd_core->hw_req_private) {
434 CAM_ERR(CAM_FD,
435 "Invalid state for results state=%d, results=%d %pK",
436 fd_core->core_state, fd_core->results_valid,
437 fd_core->hw_req_private);
438 spin_unlock(&fd_core->spin_lock);
439 return -EINVAL;
440 }
441 fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
442 req_private = fd_core->hw_req_private;
443 spin_unlock(&fd_core->spin_lock);
444
445 /*
446 * Copy the register value as is into output buffers.
447 * Wehter we are copying the output data by reading registers or
448 * programming output buffer directly to HW must be transparent to UMD.
449 * In case HW supports writing face count value directly into
450 * DDR memory in future, these values should match.
451 */
452 req_private->fd_results->face_count =
453 cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
454 hw_static_info->core_regs.result_cnt);
455
456 face_cnt = req_private->fd_results->face_count & 0x3F;
457
458 if (face_cnt > hw_static_info->results.max_faces) {
459 CAM_WARN(CAM_FD, "Face count greater than max %d %d",
460 face_cnt, hw_static_info->results.max_faces);
461 face_cnt = hw_static_info->results.max_faces;
462 }
463
464 CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
465 req_private->request_id, face_cnt);
466
467 /*
468 * We need to read the face data information from registers only
469 * if one of below is true
470 * 1. RO mode is not set. i.e FD HW doesn't write face data into
471 * DDR memory
472 * 2. On the current chipset, results written into DDR memory by FD HW
473 * are not gauranteed to be correct
474 */
475 if (!req_private->ro_mode_enabled ||
476 hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
477 buffer = (uint32_t *)&req_private->fd_results->faces[0];
478 base = hw_static_info->core_regs.results_reg_base;
479
480 /*
481 * Write register values as is into face data buffer. Its UMD
482 * driver responsibility to interpret the data and extract face
483 * properties from output buffer. Think in case output buffer
484 * is directly programmed to HW, then KMD has no control to
485 * extract the face properties and UMD anyway has to extract
486 * face properties. So we follow the same approach and keep
487 * this transparent to UMD.
488 */
489 for (i = 0;
490 i < (face_cnt *
491 hw_static_info->results.per_face_entries); i++) {
492 *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
493 CAM_FD_REG_CORE, base + (i * 0x4));
494 CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
495 buffer++;
496 }
497 }
498
499 if (req_private->get_raw_results &&
500 req_private->raw_results &&
501 hw_static_info->results.raw_results_available) {
502 buffer = req_private->raw_results;
503 base = hw_static_info->core_regs.raw_results_reg_base;
504
505 for (i = 0;
506 i < hw_static_info->results.raw_results_entries;
507 i++) {
508 *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
509 CAM_FD_REG_CORE, base + (i * 0x4));
510 CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
511 buffer++;
512 }
513 }
514
515 spin_lock(&fd_core->spin_lock);
516 fd_core->hw_req_private = NULL;
517 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
518 spin_unlock(&fd_core->spin_lock);
519
520 return 0;
521}
522
523irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
524{
525 struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
526 struct cam_fd_core *fd_core;
527 struct cam_hw_soc_info *soc_info;
528 struct cam_fd_hw_static_info *hw_static_info;
529 uint32_t reg_value;
530 enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
531 uint32_t num_irqs = 0;
532
533 if (!fd_hw) {
534 CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
535 return -EINVAL;
536 }
537
538 fd_core = (struct cam_fd_core *) fd_hw->core_info;
539 soc_info = &fd_hw->soc_info;
540 hw_static_info = fd_core->hw_static_info;
541
542 reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
543 hw_static_info->wrapper_regs.irq_status);
544
545 CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
546
547 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
548 complete_all(&fd_core->halt_complete);
549 irq_type = CAM_FD_IRQ_HALT_DONE;
550 num_irqs++;
551 }
552
553 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
554 complete_all(&fd_core->reset_complete);
555 irq_type = CAM_FD_IRQ_RESET_DONE;
556 num_irqs++;
557 }
558
559 if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
560 complete_all(&fd_core->processing_complete);
561 irq_type = CAM_FD_IRQ_FRAME_DONE;
562 num_irqs++;
563 }
564
565 /*
566 * We should never get an IRQ callback with no or more than one mask.
567 * Validate first to make sure nothing going wrong.
568 */
569 if (num_irqs != 1) {
570 CAM_ERR(CAM_FD,
571 "Invalid number of IRQs, value=0x%x, num_irqs=%d",
572 reg_value, num_irqs);
573 return -EINVAL;
574 }
575
576 cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
577 hw_static_info->wrapper_regs.irq_clear,
578 hw_static_info->irq_mask);
579
580 if (irq_type == CAM_FD_IRQ_HALT_DONE) {
581 /*
582 * Do not send HALT IRQ callback to Hw Mgr,
583 * a reset would always follow
584 */
585 return IRQ_HANDLED;
586 }
587
588 spin_lock(&fd_core->spin_lock);
589 /* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
590 if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
591 (irq_type == CAM_FD_IRQ_FRAME_DONE)) {
592
593 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
594 if (irq_type == CAM_FD_IRQ_FRAME_DONE)
595 fd_core->results_valid = true;
596
597 CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
598 irq_type, fd_core->core_state);
599 }
600 spin_unlock(&fd_core->spin_lock);
601
602 if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
603 fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
604 irq_type);
605
606 return IRQ_HANDLED;
607}
608
609int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
610 uint32_t arg_size)
611{
612 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
613 struct cam_fd_core *fd_core;
614 struct cam_fd_hw_caps *fd_hw_caps =
615 (struct cam_fd_hw_caps *)get_hw_cap_args;
616
617 if (!hw_priv || !get_hw_cap_args) {
618 CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
619 hw_priv, get_hw_cap_args);
620 return -EINVAL;
621 }
622
623 fd_core = (struct cam_fd_core *)fd_hw->core_info;
624 *fd_hw_caps = fd_core->hw_caps;
625
626 CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
627 fd_hw_caps->core_version.major,
628 fd_hw_caps->core_version.minor,
629 fd_hw_caps->wrapper_version.major,
630 fd_hw_caps->wrapper_version.minor,
631 fd_hw_caps->supported_modes,
632 fd_hw_caps->raw_results_available);
633
634 return 0;
635}
636
637int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
638{
639 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
640 struct cam_fd_core *fd_core;
641 struct cam_fd_hw_init_args *init_args =
642 (struct cam_fd_hw_init_args *)init_hw_args;
643 int rc = 0;
644
645 if (!fd_hw || !init_args) {
646 CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
647 return -EINVAL;
648 }
649
650 if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
651 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
652 sizeof(struct cam_fd_hw_init_args));
653 return -EINVAL;
654 }
655
656 fd_core = (struct cam_fd_core *)fd_hw->core_info;
657
658 mutex_lock(&fd_hw->hw_mutex);
659 CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
660
661 if (fd_hw->open_count > 0) {
662 rc = 0;
663 mutex_unlock(&fd_hw->hw_mutex);
664 goto cdm_streamon;
665 }
666
667 rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
668 if (rc) {
669 CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
670 goto unlock_return;
671 }
672
673 rc = cam_fd_hw_reset(hw_priv, NULL, 0);
674 if (rc) {
675 CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
676 goto disable_soc;
677 }
678
679 cam_fd_hw_util_enable_power_on_settings(fd_hw);
680
681 fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
682 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
683 fd_hw->open_count++;
684 CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
685
686 mutex_unlock(&fd_hw->hw_mutex);
687
688cdm_streamon:
689 if (init_args->ctx_hw_private) {
690 struct cam_fd_ctx_hw_private *ctx_hw_private =
691 init_args->ctx_hw_private;
692
693 rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
694 if (rc) {
695 CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
696 ctx_hw_private->cdm_handle, rc);
697 return rc;
698 }
699 }
700
701 return rc;
702
703disable_soc:
704 if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
705 CAM_ERR(CAM_FD, "Error in disable soc resources");
706unlock_return:
707 mutex_unlock(&fd_hw->hw_mutex);
708 return rc;
709}
710
711int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
712{
713 struct cam_hw_info *fd_hw = hw_priv;
714 struct cam_fd_core *fd_core;
715 struct cam_fd_hw_deinit_args *deinit_args =
716 (struct cam_fd_hw_deinit_args *)deinit_hw_args;
717 int rc = 0;
718
719 if (!fd_hw || !deinit_hw_args) {
720 CAM_ERR(CAM_FD, "Invalid argument");
721 return -EINVAL;
722 }
723
724 if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
725 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
726 sizeof(struct cam_fd_hw_deinit_args));
727 return -EINVAL;
728 }
729
730 fd_core = (struct cam_fd_core *)fd_hw->core_info;
731
732 if (deinit_args->ctx_hw_private) {
733 struct cam_fd_ctx_hw_private *ctx_hw_private =
734 deinit_args->ctx_hw_private;
735
736 rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
737 if (rc) {
738 CAM_ERR(CAM_FD,
739 "Failed in CDM StreamOff, handle=0x%x, rc=%d",
740 ctx_hw_private->cdm_handle, rc);
741 return rc;
742 }
743 }
744
745 mutex_lock(&fd_hw->hw_mutex);
746
747 if (fd_hw->open_count == 0) {
748 mutex_unlock(&fd_hw->hw_mutex);
749 CAM_ERR(CAM_FD, "Error Unbalanced deinit");
750 return -EFAULT;
751 }
752
753 fd_hw->open_count--;
754 CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
755
756 if (fd_hw->open_count) {
757 rc = 0;
758 goto unlock_return;
759 }
760
761 rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
762 if (rc)
763 CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
764
765 fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
766 fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
767
768unlock_return:
769 mutex_unlock(&fd_hw->hw_mutex);
770 return rc;
771}
772
773int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
774{
775 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
776 struct cam_fd_core *fd_core;
777 int rc;
778
779 if (!fd_hw) {
780 CAM_ERR(CAM_FD, "Invalid input handle");
781 return -EINVAL;
782 }
783
784 fd_core = (struct cam_fd_core *)fd_hw->core_info;
785
786 spin_lock(&fd_core->spin_lock);
787 if (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS) {
788 CAM_ERR(CAM_FD, "Reset not allowed in %d state",
789 fd_core->core_state);
790 spin_unlock(&fd_core->spin_lock);
791 return -EINVAL;
792 }
793
794 fd_core->results_valid = false;
795 fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
796 spin_unlock(&fd_core->spin_lock);
797
798 rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
799 if (rc) {
800 CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
801 return rc;
802 }
803
804 spin_lock(&fd_core->spin_lock);
805 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
806 spin_unlock(&fd_core->spin_lock);
807
808 return rc;
809}
810
811int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
812{
813 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
814 struct cam_fd_core *fd_core;
815 struct cam_fd_hw_static_info *hw_static_info;
816 struct cam_fd_hw_cmd_start_args *start_args =
817 (struct cam_fd_hw_cmd_start_args *)hw_start_args;
818 struct cam_fd_ctx_hw_private *ctx_hw_private;
819 int rc;
820
821 if (!hw_priv || !start_args) {
822 CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
823 start_args);
824 return -EINVAL;
825 }
826
827 if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
828 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
829 sizeof(struct cam_fd_hw_cmd_start_args));
830 return -EINVAL;
831 }
832
833 fd_core = (struct cam_fd_core *)fd_hw->core_info;
834 hw_static_info = fd_core->hw_static_info;
835
836 spin_lock(&fd_core->spin_lock);
837 if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
838 CAM_ERR(CAM_FD, "Cannot start in %d state",
839 fd_core->core_state);
840 spin_unlock(&fd_core->spin_lock);
841 return -EINVAL;
842 }
843
844 /*
845 * We are about to start FD HW processing, save the request
846 * private data which is being processed by HW. Once the frame
847 * processing is finished, process_cmd(FRAME_DONE) should be called
848 * with same hw_req_private as input.
849 */
850 fd_core->hw_req_private = start_args->hw_req_private;
851 fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
852 fd_core->results_valid = false;
853 spin_unlock(&fd_core->spin_lock);
854
855 ctx_hw_private = start_args->ctx_hw_private;
856
857 /* Before starting HW process, clear processing complete */
858 reinit_completion(&fd_core->processing_complete);
859
860 if (hw_static_info->enable_errata_wa.single_irq_only) {
861 cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
862 hw_static_info->wrapper_regs.irq_mask,
863 CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
864 }
865
866 if (start_args->num_hw_update_entries > 0) {
867 struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
868 struct cam_hw_update_entry *cmd;
869 int i;
870
871 cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
872 cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
873 cdm_cmd->flag = false;
874 cdm_cmd->userdata = NULL;
875 cdm_cmd->cookie = 0;
876
877 for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
878 cmd = (start_args->hw_update_entries + i);
879 cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
880 cdm_cmd->cmd[i].offset = cmd->offset;
881 cdm_cmd->cmd[i].len = cmd->len;
882 }
883
884 rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
885 if (rc) {
886 CAM_ERR(CAM_FD,
887 "Failed to submit cdm commands, rc=%d", rc);
888 goto error;
889 }
890 } else {
891 CAM_ERR(CAM_FD, "Invalid number of hw update entries");
892 rc = -EINVAL;
893 goto error;
894 }
895
896 cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_CORE,
897 hw_static_info->core_regs.control, 0x2);
898
899 return 0;
900error:
901 spin_lock(&fd_core->spin_lock);
902 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
903 spin_unlock(&fd_core->spin_lock);
904
905 return rc;
906}
907
908int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
909{
910 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
911 struct cam_fd_core *fd_core;
912 int rc;
913
914 if (!fd_hw) {
915 CAM_ERR(CAM_FD, "Invalid input handle");
916 return -EINVAL;
917 }
918
919 fd_core = (struct cam_fd_core *)fd_hw->core_info;
920
921 spin_lock(&fd_core->spin_lock);
922 if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
923 (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
924 CAM_ERR(CAM_FD, "Reset not allowed in %d state",
925 fd_core->core_state);
926 spin_unlock(&fd_core->spin_lock);
927 return -EINVAL;
928 }
929
930 fd_core->results_valid = false;
931 fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
932 spin_unlock(&fd_core->spin_lock);
933
934 rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
935 if (rc) {
936 CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
937 return rc;
938 }
939
940 /* HALT must be followed by RESET */
941 rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
942 if (rc) {
943 CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
944 return rc;
945 }
946
947 spin_lock(&fd_core->spin_lock);
948 fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
949 spin_unlock(&fd_core->spin_lock);
950
951 return rc;
952}
953
954int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
955{
956 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
957 int rc = -EINVAL;
958 struct cam_fd_ctx_hw_private *ctx_hw_private;
959 struct cam_fd_hw_reserve_args *reserve_args =
960 (struct cam_fd_hw_reserve_args *)hw_reserve_args;
961 struct cam_cdm_acquire_data cdm_acquire;
962 struct cam_cdm_bl_request *cdm_cmd;
963 int i;
964
965 if (!fd_hw || !reserve_args) {
966 CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
967 return -EINVAL;
968 }
969
970 if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
971 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
972 sizeof(struct cam_fd_hw_reserve_args));
973 return -EINVAL;
974 }
975
976 cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
977 ((CAM_FD_MAX_HW_ENTRIES - 1) *
978 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
979 if (!cdm_cmd)
980 return -ENOMEM;
981
982 ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
983 GFP_KERNEL);
984 if (!ctx_hw_private) {
985 kfree(cdm_cmd);
986 return -ENOMEM;
987 }
988
989 memset(&cdm_acquire, 0, sizeof(cdm_acquire));
990 strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
991 cdm_acquire.cell_index = fd_hw->soc_info.index;
992 cdm_acquire.handle = 0;
993 cdm_acquire.userdata = ctx_hw_private;
994 cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
995 cdm_acquire.id = CAM_CDM_VIRTUAL;
996 cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
997 for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
998 cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
999
1000 rc = cam_cdm_acquire(&cdm_acquire);
1001 if (rc) {
1002 CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
1003 goto error;
1004 }
1005
1006 ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
1007 ctx_hw_private->fd_hw = fd_hw;
1008 ctx_hw_private->mode = reserve_args->mode;
1009 ctx_hw_private->cdm_handle = cdm_acquire.handle;
1010 ctx_hw_private->cdm_ops = cdm_acquire.ops;
1011 ctx_hw_private->cdm_cmd = cdm_cmd;
1012
1013 reserve_args->ctx_hw_private = ctx_hw_private;
1014
1015 CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
1016 ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
1017 ctx_hw_private->cdm_handle);
1018
1019 return 0;
1020error:
1021 kfree(ctx_hw_private);
1022 kfree(cdm_cmd);
1023 return rc;
1024}
1025
1026int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
1027{
1028 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
1029 int rc = -EINVAL;
1030 struct cam_fd_ctx_hw_private *ctx_hw_private;
1031 struct cam_fd_hw_release_args *release_args =
1032 (struct cam_fd_hw_release_args *)hw_release_args;
1033
1034 if (!fd_hw || !release_args) {
1035 CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
1036 return -EINVAL;
1037 }
1038
1039 if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
1040 CAM_ERR(CAM_FD, "Invalid arg size %d, %d", arg_size,
1041 sizeof(struct cam_fd_hw_release_args));
1042 return -EINVAL;
1043 }
1044
1045 ctx_hw_private =
1046 (struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
1047
1048 rc = cam_cdm_release(ctx_hw_private->cdm_handle);
1049 if (rc)
1050 CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
1051 ctx_hw_private->cdm_handle, rc);
1052
1053 kfree(ctx_hw_private);
1054 release_args->ctx_hw_private = NULL;
1055
1056 return 0;
1057}
1058
1059int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
1060 void *cmd_args, uint32_t arg_size)
1061{
1062 struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
1063 int rc = -EINVAL;
1064
1065 if (!hw_priv || !cmd_args ||
1066 (cmd_type >= CAM_FD_HW_CMD_MAX)) {
1067 CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
1068 cmd_args, cmd_type);
1069 return -EINVAL;
1070 }
1071
1072 switch (cmd_type) {
1073 case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
1074 struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
1075 struct cam_fd_core *fd_core =
1076 (struct cam_fd_core *)fd_hw->core_info;
1077
1078 if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
1079 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1080 cmd_type, arg_size);
1081 break;
1082 }
1083
1084 irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
1085 fd_core->irq_cb.cam_fd_hw_mgr_cb =
1086 irq_cb_args->cam_fd_hw_mgr_cb;
1087 fd_core->irq_cb.data = irq_cb_args->data;
1088 rc = 0;
1089 break;
1090 }
1091 case CAM_FD_HW_CMD_PRESTART: {
1092 struct cam_fd_hw_cmd_prestart_args *prestart_args;
1093
1094 if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
1095 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1096 cmd_type, arg_size);
1097 break;
1098 }
1099
1100 prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
1101 rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
1102 break;
1103 }
1104 case CAM_FD_HW_CMD_FRAME_DONE: {
1105 struct cam_fd_hw_frame_done_args *cmd_frame_results;
1106
1107 if (sizeof(struct cam_fd_hw_frame_done_args) !=
1108 arg_size) {
1109 CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
1110 cmd_type, arg_size);
1111 break;
1112 }
1113
1114 cmd_frame_results =
1115 (struct cam_fd_hw_frame_done_args *)cmd_args;
1116 rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
1117 cmd_frame_results);
1118 break;
1119 }
1120 default:
1121 break;
1122 }
1123
1124 return rc;
1125}