blob: d226e1705529ed26c498b722d92d354677a99c17 [file] [log] [blame]
Pavan Kumar Chilamkurthi5719f212017-07-20 15:02:21 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <media/cam_cpas.h>
16#include <media/cam_req_mgr.h>
17
18#include "cam_io_util.h"
19#include "cam_soc_util.h"
20#include "cam_mem_mgr_api.h"
21#include "cam_smmu_api.h"
22#include "cam_packet_util.h"
23#include "cam_fd_context.h"
24#include "cam_fd_hw_intf.h"
25#include "cam_fd_hw_core.h"
26#include "cam_fd_hw_soc.h"
27#include "cam_fd_hw_mgr_intf.h"
28#include "cam_fd_hw_mgr.h"
29
30static struct cam_fd_hw_mgr g_fd_hw_mgr;
31
32static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet)
33{
34 struct cam_cmd_buf_desc *cmd_desc = NULL;
35 int i, rc;
36
37 if (!packet)
38 return -EINVAL;
39
40 CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
41 packet->header.request_id, packet->header.op_code,
42 packet->header.size, packet->header.flags);
43 CAM_DBG(CAM_FD,
44 "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
45 packet->cmd_buf_offset, packet->num_cmd_buf,
46 packet->io_configs_offset, packet->num_io_configs);
47 CAM_DBG(CAM_FD,
48 "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
49 packet->patch_offset, packet->num_patches,
50 packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
51
52 if (cam_packet_util_validate_packet(packet)) {
53 CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d",
54 packet->kmd_cmd_buf_index,
55 packet->num_cmd_buf, packet->cmd_buf_offset,
56 packet->io_configs_offset, packet->header.size);
57 return -EINVAL;
58 }
59
60 /* All buffers must come through io config, do not support patching */
61 if (packet->num_patches || !packet->num_io_configs) {
62 CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u",
63 packet->num_cmd_buf, packet->num_patches);
64 return -EINVAL;
65 }
66
67 /* KMD Buf index can never be greater than or equal to num cmd bufs */
68 if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) {
69 CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)",
70 packet->kmd_cmd_buf_index, packet->num_cmd_buf);
71 return -EINVAL;
72 }
73
74 if ((packet->header.op_code & 0xff) !=
75 CAM_PACKET_OPCODES_FD_FRAME_UPDATE) {
76 CAM_ERR(CAM_FD, "Invalid op_code %u",
77 packet->header.op_code & 0xff);
78 return -EINVAL;
79 }
80
81 cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
82 packet->cmd_buf_offset);
83
84 for (i = 0; i < packet->num_cmd_buf; i++) {
85 /*
86 * We can allow 0 length cmd buffer. This can happen in case
87 * umd gives an empty cmd buffer as kmd buffer
88 */
89 if (!cmd_desc[i].length)
90 continue;
91
92 if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) &&
93 (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) {
94 CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i,
95 cmd_desc[i].meta_data);
96 return -EINVAL;
97 }
98
99 CAM_DBG(CAM_FD,
100 "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
101 i,
102 cmd_desc[i].mem_handle, cmd_desc[i].offset,
103 cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
104 cmd_desc[i].meta_data);
105
106 rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
107 if (rc) {
108 CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i);
109 return rc;
110 }
111 }
112
113 return 0;
114}
115
116static int cam_fd_mgr_util_put_ctx(
117 struct list_head *src_list,
118 struct cam_fd_hw_mgr_ctx **fd_ctx)
119{
120 int rc = 0;
121 struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
122
123 mutex_lock(&g_fd_hw_mgr.ctx_mutex);
124 ctx_ptr = *fd_ctx;
125 if (ctx_ptr)
126 list_add_tail(&ctx_ptr->list, src_list);
127 *fd_ctx = NULL;
128 mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
129
130 return rc;
131}
132
133static int cam_fd_mgr_util_get_ctx(
134 struct list_head *src_list,
135 struct cam_fd_hw_mgr_ctx **fd_ctx)
136{
137 int rc = 0;
138 struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
139
140 mutex_lock(&g_fd_hw_mgr.ctx_mutex);
141 if (!list_empty(src_list)) {
142 ctx_ptr = list_first_entry(src_list,
143 struct cam_fd_hw_mgr_ctx, list);
144 list_del_init(&ctx_ptr->list);
145 } else {
146 CAM_ERR(CAM_FD, "No more free fd hw mgr ctx");
147 rc = -1;
148 }
149 *fd_ctx = ctx_ptr;
150 mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
151
152 return rc;
153}
154
155static int cam_fd_mgr_util_put_frame_req(
156 struct list_head *src_list,
157 struct cam_fd_mgr_frame_request **frame_req)
158{
159 int rc = 0;
160 struct cam_fd_mgr_frame_request *req_ptr = NULL;
161
162 mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
163 req_ptr = *frame_req;
164 if (req_ptr)
165 list_add_tail(&req_ptr->list, src_list);
166 *frame_req = NULL;
167 mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
168
169 return rc;
170}
171
172static int cam_fd_mgr_util_get_frame_req(
173 struct list_head *src_list,
174 struct cam_fd_mgr_frame_request **frame_req)
175{
176 int rc = 0;
177 struct cam_fd_mgr_frame_request *req_ptr = NULL;
178
179 mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
180 if (!list_empty(src_list)) {
181 req_ptr = list_first_entry(src_list,
182 struct cam_fd_mgr_frame_request, list);
183 list_del_init(&req_ptr->list);
184 } else {
185 CAM_DBG(CAM_FD, "Frame req not available");
186 rc = -EPERM;
187 }
188 *frame_req = req_ptr;
189 mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
190
191 return rc;
192}
193
194static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
195 struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device)
196{
197 if (!hw_mgr || !hw_ctx || !hw_device) {
198 CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
199 hw_device);
200 return -EINVAL;
201 }
202
203 if ((hw_ctx->device_index < 0) ||
204 (hw_ctx->device_index >= CAM_FD_HW_MAX)) {
205 CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index);
206 return -EINVAL;
207 }
208
209 CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
210 hw_ctx->device_index);
211
212 *hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
213
214 return 0;
215}
216
217static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr,
218 struct cam_fd_hw_mgr_ctx *hw_ctx)
219{
220 struct cam_fd_device *hw_device;
221 struct cam_fd_hw_release_args hw_release_args;
222 int rc;
223
224 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
225 if (rc) {
226 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
227 return rc;
228 }
229
230 if (hw_device->hw_intf->hw_ops.release) {
231 hw_release_args.hw_ctx = hw_ctx;
232 hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private;
233 rc = hw_device->hw_intf->hw_ops.release(
234 hw_device->hw_intf->hw_priv, &hw_release_args,
235 sizeof(hw_release_args));
236 if (rc) {
237 CAM_ERR(CAM_FD, "Failed in HW release %d", rc);
238 return rc;
239 }
240 } else {
241 CAM_ERR(CAM_FD, "Invalid release function");
242 }
243
244 mutex_lock(&hw_mgr->hw_mgr_mutex);
245 hw_device->num_ctxts--;
246 mutex_unlock(&hw_mgr->hw_mgr_mutex);
247
248 hw_ctx->device_index = -1;
249
250 return rc;
251}
252
253static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
254 struct cam_fd_hw_mgr_ctx *hw_ctx,
255 struct cam_fd_acquire_dev_info *fd_acquire_args)
256{
257 int i, rc;
258 struct cam_fd_hw_reserve_args hw_reserve_args;
259 struct cam_fd_device *hw_device = NULL;
260
261 if (!hw_mgr || !hw_ctx || !fd_acquire_args) {
262 CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
263 fd_acquire_args);
264 return -EINVAL;
265 }
266
267 mutex_lock(&hw_mgr->hw_mgr_mutex);
268
269 /* Check if a device is free which can satisfy the requirements */
270 for (i = 0; i < hw_mgr->num_devices; i++) {
271 hw_device = &hw_mgr->hw_device[i];
272 CAM_DBG(CAM_FD,
273 "[%d] : num_ctxts=%d, modes=%d, raw_results=%d",
274 i, hw_device->num_ctxts,
275 hw_device->hw_caps.supported_modes,
276 hw_device->hw_caps.raw_results_available);
277 if ((hw_device->num_ctxts == 0) &&
278 (fd_acquire_args->mode &
279 hw_device->hw_caps.supported_modes) &&
280 (!fd_acquire_args->get_raw_results ||
281 hw_device->hw_caps.raw_results_available)) {
282 CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i);
283 hw_device->num_ctxts++;
284 break;
285 }
286 }
287
288 /*
289 * We couldn't find a free HW which meets requirement, now check if
290 * there is a HW which meets acquire requirements
291 */
292 if (i == hw_mgr->num_devices) {
293 for (i = 0; i < hw_mgr->num_devices; i++) {
294 hw_device = &hw_mgr->hw_device[i];
295 if ((fd_acquire_args->mode &
296 hw_device->hw_caps.supported_modes) &&
297 (!fd_acquire_args->get_raw_results ||
298 hw_device->hw_caps.raw_results_available)) {
299 hw_device->num_ctxts++;
300 CAM_DBG(CAM_FD, "Found sharing HW Index=%d", i);
301 break;
302 }
303 }
304 }
305
306 mutex_unlock(&hw_mgr->hw_mgr_mutex);
307
308 if ((i == hw_mgr->num_devices) || !hw_device) {
309 CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d",
310 fd_acquire_args->mode,
311 fd_acquire_args->get_raw_results);
312 return -EBUSY;
313 }
314
315 CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i);
316
317 /* Check if we can reserve this HW */
318 if (hw_device->hw_intf->hw_ops.reserve) {
319 hw_reserve_args.hw_ctx = hw_ctx;
320 hw_reserve_args.mode = fd_acquire_args->mode;
321 rc = hw_device->hw_intf->hw_ops.reserve(
322 hw_device->hw_intf->hw_priv, &hw_reserve_args,
323 sizeof(hw_reserve_args));
324 if (rc) {
325 CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc);
326 return rc;
327 }
328 hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private;
329 } else {
330 CAM_ERR(CAM_FD, "Invalid reserve function");
331 return -EPERM;
332 }
333
334 /* Update required info in hw context */
335 hw_ctx->device_index = i;
336
337 CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
338 hw_ctx->device_index);
339
340 return 0;
341}
342
343static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node,
344 int i, struct cam_hw_intf **device_hw_intf)
345{
346 struct device_node *device_node = NULL;
347 struct platform_device *child_pdev = NULL;
348 struct cam_hw_intf *hw_intf = NULL;
349 const char *name = NULL;
350 int rc;
351
352 rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name);
353 if (rc) {
354 CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc);
355 goto put_node;
356 }
357
358 device_node = of_find_node_by_name(NULL, name);
359 if (!device_node) {
360 CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name);
361 return -ENODEV;
362 }
363
364 child_pdev = of_find_device_by_node(device_node);
365 if (!child_pdev) {
366 CAM_ERR(CAM_FD, "Failed to find device on bus %s",
367 device_node->name);
368 rc = -ENODEV;
369 goto put_node;
370 }
371
372 hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev);
373 if (!hw_intf) {
374 CAM_ERR(CAM_FD, "No driver data for child device");
375 rc = -ENODEV;
376 goto put_node;
377 }
378
379 CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK",
380 hw_intf->hw_type, hw_intf->hw_idx, hw_intf);
381
382 if (hw_intf->hw_idx >= CAM_FD_HW_MAX) {
383 CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx);
384 rc = -ENODEV;
385 goto put_node;
386 }
387
388 rc = 0;
389 *device_hw_intf = hw_intf;
390
391put_node:
392 of_node_put(device_node);
393
394 return rc;
395}
396
397static int cam_fd_packet_generic_blob_handler(void *user_data,
398 uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
399{
400 struct cam_fd_hw_cmd_prestart_args *prestart_args =
401 (struct cam_fd_hw_cmd_prestart_args *)user_data;
402
403 if (!blob_data || (blob_size == 0)) {
404 CAM_ERR(CAM_FD, "Invalid blob info %pK %d", blob_data,
405 blob_size);
406 return -EINVAL;
407 }
408
409 if (!prestart_args) {
410 CAM_ERR(CAM_FD, "Invalid user data");
411 return -EINVAL;
412 }
413
414 switch (blob_type) {
415 case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: {
416 uint32_t *get_raw_results = (uint32_t *)blob_data;
417
418 if (sizeof(uint32_t) != blob_size) {
419 CAM_ERR(CAM_FD, "Invalid blob size %d %d",
420 sizeof(uint32_t), blob_size);
421 return -EINVAL;
422 }
423
424 prestart_args->get_raw_results = *get_raw_results;
425 break;
426 }
427 case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: {
428 struct cam_fd_soc_clock_bw_request *clk_req =
429 (struct cam_fd_soc_clock_bw_request *)blob_data;
430
431 if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
432 CAM_ERR(CAM_FD, "Invalid blob size %d %d",
433 sizeof(struct cam_fd_soc_clock_bw_request),
434 blob_size);
435 return -EINVAL;
436 }
437
438 CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld",
439 clk_req->clock_rate, clk_req->bandwidth);
440
441 break;
442 }
443 default:
444 CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type);
445 break;
446 }
447
448 return 0;
449}
450
451static int cam_fd_mgr_util_parse_generic_cmd_buffer(
452 struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet,
453 struct cam_fd_hw_cmd_prestart_args *prestart_args)
454{
455 struct cam_cmd_buf_desc *cmd_desc = NULL;
456 int i, rc = 0;
457
458 cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
459 packet->cmd_buf_offset);
460
461 for (i = 0; i < packet->num_cmd_buf; i++) {
462 if (!cmd_desc[i].length)
463 continue;
464
465 if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM)
466 continue;
467
468 rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
469 if (rc)
470 return rc;
471
472 rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
473 cam_fd_packet_generic_blob_handler, prestart_args);
474 if (rc)
475 CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc);
476
477 break;
478 }
479
480 return rc;
481}
482
483static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
484 struct cam_hw_prepare_update_args *prepare,
485 struct cam_fd_hw_io_buffer *input_buf,
486 struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size)
487{
488 int rc = -EINVAL;
489 uint32_t i, j, plane, num_out_buf, num_in_buf;
490 struct cam_buf_io_cfg *io_cfg;
491 uint64_t io_addr[CAM_PACKET_MAX_PLANES];
492 uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
493 size_t size;
494
495 /* Get IO Buf information */
496 num_out_buf = 0;
497 num_in_buf = 0;
498 io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
499 &prepare->packet->payload + prepare->packet->io_configs_offset);
500
501 for (i = 0; i < prepare->packet->num_io_configs; i++) {
502 CAM_DBG(CAM_FD,
503 "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
504 i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
505 io_cfg[i].resource_type,
506 io_cfg[i].fence, io_cfg[i].format);
507
508 if ((num_in_buf >= io_buf_size) ||
509 (num_out_buf >= io_buf_size)) {
510 CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d",
511 num_in_buf, num_out_buf, io_buf_size);
512 return -EINVAL;
513 }
514
515 memset(io_addr, 0x0, sizeof(io_addr));
516 for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
517 if (!io_cfg[i].mem_handle[plane])
518 break;
519
520 rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
521 iommu_hdl, &io_addr[plane], &size);
522 if ((rc) || (io_addr[plane] >> 32)) {
523 CAM_ERR(CAM_FD, "Invalid io addr for %d %d",
524 plane, rc);
525 return -ENOMEM;
526 }
527
528 /*
529 * Buffers may be accessed by CPU as well, we do not
530 * know at this point, so get both and send to HW layer
531 */
532 rc = cam_mem_get_cpu_buf(io_cfg[i].mem_handle[plane],
533 &cpu_addr[plane], &size);
534 if (rc) {
535 CAM_ERR(CAM_FD, "unable to get buf address");
536 return rc;
537 }
538
539 io_addr[plane] += io_cfg[i].offsets[plane];
540 cpu_addr[plane] += io_cfg[i].offsets[plane];
541
542 CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
543 io_cfg[i].direction, plane, io_addr[plane],
544 cpu_addr[plane]);
545 }
546
547 switch (io_cfg[i].direction) {
548 case CAM_BUF_INPUT: {
549 prepare->in_map_entries[num_in_buf].resource_handle =
550 io_cfg[i].resource_type;
551 prepare->in_map_entries[num_in_buf].sync_id =
552 io_cfg[i].fence;
553
554 input_buf[num_in_buf].valid = true;
555 for (j = 0; j < plane; j++) {
556 input_buf[num_in_buf].io_addr[j] = io_addr[j];
557 input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j];
558 }
559 input_buf[num_in_buf].num_buf = plane;
560 input_buf[num_in_buf].io_cfg = &io_cfg[i];
561
562 num_in_buf++;
563 break;
564 }
565 case CAM_BUF_OUTPUT: {
566 prepare->out_map_entries[num_out_buf].resource_handle =
567 io_cfg[i].resource_type;
568 prepare->out_map_entries[num_out_buf].sync_id =
569 io_cfg[i].fence;
570
571 output_buf[num_out_buf].valid = true;
572 for (j = 0; j < plane; j++) {
573 output_buf[num_out_buf].io_addr[j] = io_addr[j];
574 output_buf[num_out_buf].cpu_addr[j] =
575 cpu_addr[j];
576 }
577 output_buf[num_out_buf].num_buf = plane;
578 output_buf[num_out_buf].io_cfg = &io_cfg[i];
579
580 num_out_buf++;
581 break;
582 }
583 default:
584 CAM_ERR(CAM_FD, "Unsupported io direction %d",
585 io_cfg[i].direction);
586 return -EINVAL;
587 }
588 }
589
590 prepare->num_in_map_entries = num_in_buf;
591 prepare->num_out_map_entries = num_out_buf;
592
593 return 0;
594}
595
596static int cam_fd_mgr_util_prepare_hw_update_entries(
597 struct cam_fd_hw_mgr *hw_mgr,
598 struct cam_hw_prepare_update_args *prepare,
599 struct cam_fd_hw_cmd_prestart_args *prestart_args,
600 struct cam_kmd_buf_info *kmd_buf_info)
601{
602 int i, rc;
603 struct cam_hw_update_entry *hw_entry;
604 uint32_t num_ent;
605 struct cam_fd_hw_mgr_ctx *hw_ctx =
606 (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
607 struct cam_fd_device *hw_device;
608 uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0;
609 uint32_t *kmd_buf_addr;
610 struct cam_cmd_buf_desc *cmd_desc = NULL;
611
612 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
613 if (rc) {
614 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
615 return rc;
616 }
617
618 kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
619 kmd_buf_info->used_bytes);
620 kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
621
622 prestart_args->cmd_buf_addr = kmd_buf_addr;
623 prestart_args->size = kmd_buf_max_size;
624 prestart_args->pre_config_buf_size = 0;
625 prestart_args->post_config_buf_size = 0;
626
627 if (hw_device->hw_intf->hw_ops.process_cmd) {
628 rc = hw_device->hw_intf->hw_ops.process_cmd(
629 hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART,
630 prestart_args,
631 sizeof(struct cam_fd_hw_cmd_prestart_args));
632 if (rc) {
633 CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc);
634 return rc;
635 }
636 }
637
638 kmd_buf_used_bytes += prestart_args->pre_config_buf_size;
639 kmd_buf_used_bytes += prestart_args->post_config_buf_size;
640
641 /* HW layer is expected to add commands */
642 if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
643 CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)",
644 kmd_buf_used_bytes, kmd_buf_max_size);
645 return -ENOMEM;
646 }
647
648 hw_entry = prepare->hw_update_entries;
649 num_ent = 0;
650
651 if (prestart_args->pre_config_buf_size) {
652 if ((num_ent + 1) >= prepare->max_hw_update_entries) {
653 CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
654 num_ent, prepare->max_hw_update_entries);
655 return -EINVAL;
656 }
657
658 hw_entry[num_ent].handle = kmd_buf_info->handle;
659 hw_entry[num_ent].len = prestart_args->pre_config_buf_size;
660 hw_entry[num_ent].offset = kmd_buf_info->offset;
661
662 kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size;
663 kmd_buf_info->offset += prestart_args->pre_config_buf_size;
664 num_ent++;
665 }
666
667 /*
668 * set the cmd_desc to point the first command descriptor in the
669 * packet and update hw entries with CDM command buffers
670 */
671 cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
672 &prepare->packet->payload + prepare->packet->cmd_buf_offset);
673
674 for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
675 if (!cmd_desc[i].length)
676 continue;
677
678 if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)
679 continue;
680
681 if (num_ent + 1 >= prepare->max_hw_update_entries) {
682 CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
683 num_ent, prepare->max_hw_update_entries);
684 return -EINVAL;
685 }
686
687 hw_entry[num_ent].handle = cmd_desc[i].mem_handle;
688 hw_entry[num_ent].len = cmd_desc[i].length;
689 hw_entry[num_ent].offset = cmd_desc[i].offset;
690 num_ent++;
691 }
692
693 if (prestart_args->post_config_buf_size) {
694 if (num_ent + 1 >= prepare->max_hw_update_entries) {
695 CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
696 num_ent, prepare->max_hw_update_entries);
697 return -EINVAL;
698 }
699
700 hw_entry[num_ent].handle = kmd_buf_info->handle;
701 hw_entry[num_ent].len = prestart_args->post_config_buf_size;
702 hw_entry[num_ent].offset = kmd_buf_info->offset;
703
704 kmd_buf_info->used_bytes += prestart_args->post_config_buf_size;
705 kmd_buf_info->offset += prestart_args->post_config_buf_size;
706
707 num_ent++;
708 }
709
710 prepare->num_hw_update_entries = num_ent;
711
712 CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
713 prepare->num_hw_update_entries, prepare->num_in_map_entries,
714 prepare->num_out_map_entries);
715
716 return rc;
717}
718
719static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
720{
721 struct cam_fd_device *hw_device;
722 struct cam_fd_hw_mgr *hw_mgr;
723 struct cam_fd_mgr_frame_request *frame_req;
724 struct cam_fd_hw_mgr_ctx *hw_ctx;
725 struct cam_fd_hw_cmd_start_args start_args;
726 int rc;
727
728 if (!priv) {
729 CAM_ERR(CAM_FD, "Invalid data");
730 return -EINVAL;
731 }
732
733 hw_mgr = (struct cam_fd_hw_mgr *)priv;
734 mutex_lock(&hw_mgr->frame_req_mutex);
735
736 /* Check if we have any frames pending in high priority list */
737 if (!list_empty(&hw_mgr->frame_pending_list_high)) {
738 CAM_DBG(CAM_FD, "Pending frames in high priority list");
739 frame_req = list_first_entry(&hw_mgr->frame_pending_list_high,
740 struct cam_fd_mgr_frame_request, list);
741 } else if (!list_empty(&hw_mgr->frame_pending_list_normal)) {
742 CAM_DBG(CAM_FD, "Pending frames in normal priority list");
743 frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal,
744 struct cam_fd_mgr_frame_request, list);
745 } else {
746 mutex_unlock(&hw_mgr->frame_req_mutex);
747 CAM_DBG(CAM_FD, "No pending frames");
748 return 0;
749 }
750
751 CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id);
752 hw_ctx = frame_req->hw_ctx;
753 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
754 if (rc) {
755 mutex_unlock(&hw_mgr->frame_req_mutex);
756 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
757 return rc;
758 }
759
760 mutex_lock(&hw_device->lock);
761 if (hw_device->ready_to_process == false) {
762 mutex_unlock(&hw_device->lock);
763 mutex_unlock(&hw_mgr->frame_req_mutex);
764 return -EBUSY;
765 }
766
767 list_del_init(&frame_req->list);
768 mutex_unlock(&hw_mgr->frame_req_mutex);
769
770 if (hw_device->hw_intf->hw_ops.start) {
771 start_args.hw_ctx = hw_ctx;
772 start_args.ctx_hw_private = hw_ctx->ctx_hw_private;
773 start_args.hw_req_private = &frame_req->hw_req_private;
774 start_args.hw_update_entries = frame_req->hw_update_entries;
775 start_args.num_hw_update_entries =
776 frame_req->num_hw_update_entries;
777
778 rc = hw_device->hw_intf->hw_ops.start(
779 hw_device->hw_intf->hw_priv, &start_args,
780 sizeof(start_args));
781 if (rc) {
782 CAM_ERR(CAM_FD, "Failed in HW Start %d", rc);
783 mutex_unlock(&hw_device->lock);
784 goto put_req_into_free_list;
785 }
786 } else {
787 CAM_ERR(CAM_FD, "Invalid hw_ops.start");
788 mutex_unlock(&hw_device->lock);
789 rc = -EPERM;
790 goto put_req_into_free_list;
791 }
792
793 hw_device->ready_to_process = false;
794 mutex_unlock(&hw_device->lock);
795
796 rc = cam_fd_mgr_util_put_frame_req(
797 &hw_mgr->frame_processing_list, &frame_req);
798 if (rc) {
799 CAM_ERR(CAM_FD,
800 "Failed in putting frame req in processing list");
801 goto stop_unlock;
802 }
803
804 return rc;
805
806stop_unlock:
807 if (hw_device->hw_intf->hw_ops.stop) {
808 struct cam_fd_hw_stop_args stop_args;
809
810 stop_args.hw_ctx = hw_ctx;
811 stop_args.ctx_hw_private = hw_ctx->ctx_hw_private;
812 stop_args.hw_req_private = &frame_req->hw_req_private;
813 if (hw_device->hw_intf->hw_ops.stop(
814 hw_device->hw_intf->hw_priv, &stop_args,
815 sizeof(stop_args)))
816 CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
817 }
818put_req_into_free_list:
819 cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req);
820
821 return rc;
822}
823
824static int cam_fd_mgr_util_schedule_frame_worker_task(
825 struct cam_fd_hw_mgr *hw_mgr)
826{
827 int32_t rc = 0;
828 struct crm_workq_task *task;
829 struct cam_fd_mgr_work_data *work_data;
830
831 task = cam_req_mgr_workq_get_task(hw_mgr->work);
832 if (!task) {
833 CAM_ERR(CAM_FD, "no empty task available");
834 return -ENOMEM;
835 }
836
837 work_data = (struct cam_fd_mgr_work_data *)task->payload;
838 work_data->type = CAM_FD_WORK_FRAME;
839
840 task->process_cb = cam_fd_mgr_util_submit_frame;
841 rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
842
843 return rc;
844}
845
846static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
847{
848 struct cam_fd_device *hw_device = NULL;
849 struct cam_fd_hw_mgr *hw_mgr;
850 struct cam_fd_mgr_work_data *work_data;
851 struct cam_fd_mgr_frame_request *frame_req = NULL;
852 enum cam_fd_hw_irq_type irq_type;
853 bool frame_abort = true;
854 int rc;
855
856 if (!data || !priv) {
857 CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv);
858 return -EINVAL;
859 }
860
861 hw_mgr = (struct cam_fd_hw_mgr *)priv;
862 work_data = (struct cam_fd_mgr_work_data *)data;
863 irq_type = work_data->irq_type;
864
865 CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type);
866
867 if (irq_type == CAM_FD_IRQ_HALT_DONE) {
868 /* HALT would be followed by a RESET, ignore this */
869 CAM_DBG(CAM_FD, "HALT IRQ callback");
870 return 0;
871 }
872
873 /* Get the frame from processing list */
874 rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list,
875 &frame_req);
876 if (rc || !frame_req) {
877 /*
878 * This can happen if reset is triggered while no frames
879 * were pending, so not an error, just continue to check if
880 * there are any pending frames and submit
881 */
882 CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc);
883 goto submit_next_frame;
884 }
885
886 if (!frame_req->hw_ctx) {
887 CAM_ERR(CAM_FD, "Invalid Frame request %lld",
888 frame_req->request_id);
889 goto put_req_in_free_list;
890 }
891
892 rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device);
893 if (rc) {
894 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
895 goto put_req_in_free_list;
896 }
897
898 /* Read frame results first */
899 if (irq_type == CAM_FD_IRQ_FRAME_DONE) {
900 struct cam_fd_hw_frame_done_args frame_done_args;
901
902 CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]",
903 frame_req->request_id);
904
905 frame_done_args.hw_ctx = frame_req->hw_ctx;
906 frame_done_args.ctx_hw_private =
907 frame_req->hw_ctx->ctx_hw_private;
908 frame_done_args.request_id = frame_req->request_id;
909 frame_done_args.hw_req_private = &frame_req->hw_req_private;
910
911 if (hw_device->hw_intf->hw_ops.process_cmd) {
912 rc = hw_device->hw_intf->hw_ops.process_cmd(
913 hw_device->hw_intf->hw_priv,
914 CAM_FD_HW_CMD_FRAME_DONE,
915 &frame_done_args, sizeof(frame_done_args));
916 if (rc) {
917 CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d",
918 rc);
919 frame_abort = true;
920 goto notify_context;
921 }
922 }
923
924 frame_abort = false;
925 }
926
927notify_context:
928 /* Do a callback to inform frame done or stop done */
929 if (frame_req->hw_ctx->event_cb) {
930 struct cam_hw_done_event_data buf_data;
931
932 CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]",
933 frame_req->request_id);
934
935 buf_data.num_handles = frame_req->num_hw_update_entries;
936 buf_data.request_id = frame_req->request_id;
937
938 rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv,
939 frame_abort, &buf_data);
940 if (rc)
941 CAM_ERR(CAM_FD, "Error in event cb handling %d", rc);
942 }
943
944 /*
945 * Now we can set hw device is free to process further frames.
946 * Note - Do not change state to IDLE until we read the frame results,
947 * Otherwise, other thread may schedule frame processing before
948 * reading current frame's results. Also, we need to set to IDLE state
949 * in case some error happens after getting this irq callback
950 */
951 mutex_lock(&hw_device->lock);
952 hw_device->ready_to_process = true;
953 CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
954 mutex_unlock(&hw_device->lock);
955
956put_req_in_free_list:
957 rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
958 &frame_req);
959 if (rc) {
960 CAM_ERR(CAM_FD, "Failed in putting frame req in free list");
961 /* continue */
962 }
963
964submit_next_frame:
965 /* Check if there are any frames pending for processing and submit */
966 rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL);
967 if (rc) {
968 CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc);
969 return rc;
970 }
971
972 return rc;
973}
974
975static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type)
976{
977 struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr;
978 int rc = 0;
979 unsigned long flags;
980 struct crm_workq_task *task;
981 struct cam_fd_mgr_work_data *work_data;
982
983 spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags);
984 task = cam_req_mgr_workq_get_task(hw_mgr->work);
985 if (!task) {
986 CAM_ERR(CAM_FD, "no empty task available");
987 spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
988 return -ENOMEM;
989 }
990
991 work_data = (struct cam_fd_mgr_work_data *)task->payload;
992 work_data->type = CAM_FD_WORK_IRQ;
993 work_data->irq_type = irq_type;
994
995 task->process_cb = cam_fd_mgr_workq_irq_cb;
996 rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
997 if (rc)
998 CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc);
999
1000 spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
1001
1002 return rc;
1003}
1004
1005static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
1006{
1007 int rc = 0;
1008 struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
1009 struct cam_query_cap_cmd *query = hw_get_caps_args;
1010 struct cam_fd_query_cap_cmd query_fd;
1011
1012 if (copy_from_user(&query_fd, (void __user *)query->caps_handle,
1013 sizeof(struct cam_fd_query_cap_cmd))) {
1014 CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
1015 return -EFAULT;
1016 }
1017
1018 query_fd = hw_mgr->fd_caps;
1019
1020 CAM_DBG(CAM_FD,
1021 "IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d",
1022 query_fd.device_iommu.secure, query_fd.device_iommu.non_secure,
1023 query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure,
1024 query_fd.hw_caps.core_version.major,
1025 query_fd.hw_caps.core_version.minor,
1026 query_fd.hw_caps.wrapper_version.major,
1027 query_fd.hw_caps.wrapper_version.minor);
1028
1029 if (copy_to_user((void __user *)query->caps_handle, &query_fd,
1030 sizeof(struct cam_fd_query_cap_cmd)))
1031 rc = -EFAULT;
1032
1033 return rc;
1034}
1035
1036static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
1037{
1038 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1039 struct cam_hw_acquire_args *acquire_args =
1040 (struct cam_hw_acquire_args *)hw_acquire_args;
1041 struct cam_fd_hw_mgr_ctx *hw_ctx;
1042 struct cam_fd_acquire_dev_info fd_acquire_args;
1043 int rc;
1044
1045 if (!acquire_args || acquire_args->num_acq <= 0) {
1046 CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args);
1047 return -EINVAL;
1048 }
1049
1050 if (copy_from_user(&fd_acquire_args,
1051 (void __user *)acquire_args->acquire_info,
1052 sizeof(struct cam_fd_acquire_dev_info))) {
1053 CAM_ERR(CAM_FD, "Copy from user failed");
1054 return -EFAULT;
1055 }
1056
1057 CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d",
1058 fd_acquire_args.mode, fd_acquire_args.get_raw_results,
1059 fd_acquire_args.priority);
1060
1061 /* get a free fd hw mgr ctx */
1062 rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
1063 if (rc || !hw_ctx) {
1064 CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK",
1065 rc, hw_ctx);
1066 return -EINVAL;
1067 }
1068
1069 if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) {
1070 CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)",
1071 fd_acquire_args.get_raw_results,
1072 hw_mgr->raw_results_available);
1073 goto put_ctx;
1074 }
1075
1076 if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) {
1077 CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)",
1078 fd_acquire_args.mode, hw_mgr->supported_modes);
1079 rc = -EPERM;
1080 goto put_ctx;
1081 }
1082
1083 rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args);
1084 if (rc) {
1085 CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc);
1086 goto put_ctx;
1087 }
1088
1089 hw_ctx->ctx_in_use = true;
1090 hw_ctx->hw_mgr = hw_mgr;
1091 hw_ctx->get_raw_results = fd_acquire_args.get_raw_results;
1092 hw_ctx->mode = fd_acquire_args.mode;
1093
1094 /* Save incoming cam core info into hw ctx*/
1095 hw_ctx->cb_priv = acquire_args->context_data;
1096 hw_ctx->event_cb = acquire_args->event_cb;
1097
1098 /* Update out args */
1099 acquire_args->ctxt_to_hw_map = hw_ctx;
1100
1101 cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx);
1102
1103 return 0;
1104put_ctx:
1105 list_del_init(&hw_ctx->list);
1106 cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
1107 return rc;
1108}
1109
1110static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
1111{
1112 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1113 struct cam_hw_release_args *release_args = hw_release_args;
1114 struct cam_fd_hw_mgr_ctx *hw_ctx;
1115 int rc;
1116
1117 if (!hw_mgr_priv || !hw_release_args) {
1118 CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK",
1119 hw_mgr_priv, hw_release_args);
1120 return -EINVAL;
1121 }
1122
1123 hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
1124 if (!hw_ctx || !hw_ctx->ctx_in_use) {
1125 CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
1126 return -EPERM;
1127 }
1128
1129 rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx);
1130 if (rc)
1131 CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
1132
1133 list_del_init(&hw_ctx->list);
1134 cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
1135
1136 return 0;
1137}
1138
1139static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
1140{
1141 int rc = 0;
1142 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1143 struct cam_hw_start_args *hw_mgr_start_args =
1144 (struct cam_hw_start_args *)mgr_start_args;
1145 struct cam_fd_hw_mgr_ctx *hw_ctx;
1146 struct cam_fd_device *hw_device;
1147 struct cam_fd_hw_init_args hw_init_args;
1148
1149 if (!hw_mgr_priv || !hw_mgr_start_args) {
1150 CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
1151 hw_mgr_priv, hw_mgr_start_args);
1152 return -EINVAL;
1153 }
1154
1155 hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map;
1156 if (!hw_ctx || !hw_ctx->ctx_in_use) {
1157 CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
1158 return -EPERM;
1159 }
1160
1161 CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
1162 hw_ctx->device_index);
1163
1164 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
1165 if (rc) {
1166 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
1167 return rc;
1168 }
1169
1170 if (hw_device->hw_intf->hw_ops.init) {
1171 hw_init_args.hw_ctx = hw_ctx;
1172 hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private;
1173 rc = hw_device->hw_intf->hw_ops.init(
1174 hw_device->hw_intf->hw_priv, &hw_init_args,
1175 sizeof(hw_init_args));
1176 if (rc) {
1177 CAM_ERR(CAM_FD, "Failed in HW Init %d", rc);
1178 return rc;
1179 }
1180 } else {
1181 CAM_ERR(CAM_FD, "Invalid init function");
1182 return -EINVAL;
1183 }
1184
1185 return rc;
1186}
1187
1188static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
1189{
1190 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1191 struct cam_hw_stop_args *hw_mgr_stop_args =
1192 (struct cam_hw_stop_args *)mgr_stop_args;
1193 struct cam_fd_hw_mgr_ctx *hw_ctx;
1194 struct cam_fd_device *hw_device;
1195 struct cam_fd_hw_stop_args hw_stop_args;
1196 struct cam_fd_hw_deinit_args hw_deinit_args;
1197 int rc = 0;
1198
1199 if (!hw_mgr_priv || !hw_mgr_stop_args) {
1200 CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
1201 hw_mgr_priv, hw_mgr_stop_args);
1202 return -EINVAL;
1203 }
1204
1205 hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map;
1206 if (!hw_ctx || !hw_ctx->ctx_in_use) {
1207 CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
1208 return -EPERM;
1209 }
1210 CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
1211 hw_ctx->device_index);
1212
1213 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
1214 if (rc) {
1215 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
1216 return rc;
1217 }
1218
1219 CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
1220 hw_device->ready_to_process);
1221
1222 if ((hw_device->hw_intf->hw_ops.stop) &&
1223 (hw_device->ready_to_process == false)) {
1224 /*
1225 * Even if device is in processing state, we should submit
1226 * stop command only if this ctx is running on hw
1227 */
1228 hw_stop_args.hw_ctx = hw_ctx;
1229 rc = hw_device->hw_intf->hw_ops.stop(
1230 hw_device->hw_intf->hw_priv, &hw_stop_args,
1231 sizeof(hw_stop_args));
1232 if (rc) {
1233 CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
1234 return rc;
1235 }
1236 }
1237
1238 if (hw_device->hw_intf->hw_ops.deinit) {
1239 hw_deinit_args.hw_ctx = hw_ctx;
1240 hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
1241 rc = hw_device->hw_intf->hw_ops.deinit(
1242 hw_device->hw_intf->hw_priv, &hw_deinit_args,
1243 sizeof(hw_deinit_args));
1244 if (rc) {
1245 CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc);
1246 return rc;
1247 }
1248 }
1249
1250 return rc;
1251}
1252
1253static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
1254 void *hw_prepare_update_args)
1255{
1256 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1257 struct cam_hw_prepare_update_args *prepare =
1258 (struct cam_hw_prepare_update_args *) hw_prepare_update_args;
1259 struct cam_fd_hw_mgr_ctx *hw_ctx;
1260 struct cam_fd_device *hw_device;
1261 struct cam_kmd_buf_info kmd_buf;
1262 int rc;
1263 struct cam_fd_hw_cmd_prestart_args prestart_args;
1264 struct cam_fd_mgr_frame_request *frame_req;
1265
1266 if (!hw_mgr_priv || !hw_prepare_update_args) {
1267 CAM_ERR(CAM_FD, "Invalid args %pK %pK",
1268 hw_mgr_priv, hw_prepare_update_args);
1269 return -EINVAL;
1270 }
1271
1272 hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
1273 if (!hw_ctx || !hw_ctx->ctx_in_use) {
1274 CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
1275 return -EPERM;
1276 }
1277
1278 rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
1279 if (rc) {
1280 CAM_ERR(CAM_FD, "Error in getting device %d", rc);
1281 goto error;
1282 }
1283
1284 rc = cam_fd_mgr_util_packet_validate(prepare->packet);
1285 if (rc) {
1286 CAM_ERR(CAM_FD, "Error in packet validation %d", rc);
1287 goto error;
1288 }
1289
1290 rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
1291 if (rc) {
1292 CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc);
1293 goto error;
1294 }
1295
1296 CAM_DBG(CAM_FD,
1297 "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
1298 kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
1299 kmd_buf.size, kmd_buf.used_bytes);
1300
1301 /* We do not expect any patching, but just do it anyway */
1302 rc = cam_packet_util_process_patches(prepare->packet,
1303 hw_mgr->device_iommu.non_secure);
1304 if (rc) {
1305 CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
1306 return rc;
1307 }
1308
1309 memset(&prestart_args, 0x0, sizeof(prestart_args));
1310 prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private;
1311 prestart_args.hw_ctx = hw_ctx;
1312 prestart_args.request_id = prepare->packet->header.request_id;
1313
1314 rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet,
1315 &prestart_args);
1316 if (rc) {
1317 CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc);
1318 goto error;
1319 }
1320
1321 rc = cam_fd_mgr_util_prepare_io_buf_info(
1322 hw_mgr->device_iommu.non_secure, prepare,
1323 prestart_args.input_buf, prestart_args.output_buf,
1324 CAM_FD_MAX_IO_BUFFERS);
1325 if (rc) {
1326 CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc);
1327 goto error;
1328 }
1329
1330 rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare,
1331 &prestart_args, &kmd_buf);
1332 if (rc) {
1333 CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
1334 goto error;
1335 }
1336
1337 /* get a free frame req from free list */
1338 rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
1339 &frame_req);
1340 if (rc || !frame_req) {
1341 CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
1342 rc, hw_ctx);
1343 return -ENOMEM;
1344 }
1345
1346 /* Setup frame request info and queue to pending list */
1347 frame_req->hw_ctx = hw_ctx;
1348 frame_req->request_id = prepare->packet->header.request_id;
1349 /* This has to be passed to HW while calling hw_ops->start */
1350 frame_req->hw_req_private = prestart_args.hw_req_private;
1351
1352 /*
1353 * Save the current frame_req into priv,
1354 * this will come as priv while hw_config
1355 */
1356 prepare->priv = frame_req;
1357
1358 CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
1359
1360 return 0;
1361error:
1362 return rc;
1363}
1364
1365static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args)
1366{
1367 struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
1368 struct cam_hw_config_args *config =
1369 (struct cam_hw_config_args *) hw_config_args;
1370 struct cam_fd_hw_mgr_ctx *hw_ctx;
1371 struct cam_fd_mgr_frame_request *frame_req;
1372 int rc;
1373 int i;
1374
1375 if (!hw_mgr || !config) {
1376 CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config);
1377 return -EINVAL;
1378 }
1379
1380 if (!config->num_hw_update_entries) {
1381 CAM_ERR(CAM_FD, "No hw update enteries are available");
1382 return -EINVAL;
1383 }
1384
1385 hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map;
1386 if (!hw_ctx || !hw_ctx->ctx_in_use) {
1387 CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
1388 return -EPERM;
1389 }
1390
1391 frame_req = config->priv;
1392 CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
1393
1394 frame_req->num_hw_update_entries = config->num_hw_update_entries;
1395 for (i = 0; i < config->num_hw_update_entries; i++) {
1396 frame_req->hw_update_entries[i] = config->hw_update_entries[i];
1397 CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK",
1398 frame_req->hw_update_entries[i].handle,
1399 frame_req->hw_update_entries[i].offset,
1400 frame_req->hw_update_entries[i].len,
1401 frame_req->hw_update_entries[i].flags,
1402 frame_req->hw_update_entries[i].addr);
1403 }
1404
1405 if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
1406 CAM_DBG(CAM_FD, "Insert frame into prio0 queue");
1407 rc = cam_fd_mgr_util_put_frame_req(
1408 &hw_mgr->frame_pending_list_high, &frame_req);
1409 } else {
1410 CAM_DBG(CAM_FD, "Insert frame into prio1 queue");
1411 rc = cam_fd_mgr_util_put_frame_req(
1412 &hw_mgr->frame_pending_list_normal, &frame_req);
1413 }
1414 if (rc) {
1415 CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc);
1416 goto put_free_list;
1417 }
1418
1419 rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr);
1420 if (rc) {
1421 CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc);
1422 goto remove_and_put_free_list;
1423 }
1424
1425 return 0;
1426
1427remove_and_put_free_list:
1428
1429 if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
1430 CAM_DBG(CAM_FD, "Removing frame into prio0 queue");
1431 cam_fd_mgr_util_get_frame_req(
1432 &hw_mgr->frame_pending_list_high, &frame_req);
1433 } else {
1434 CAM_DBG(CAM_FD, "Removing frame into prio1 queue");
1435 cam_fd_mgr_util_get_frame_req(
1436 &hw_mgr->frame_pending_list_normal, &frame_req);
1437 }
1438put_free_list:
1439 cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
1440 &frame_req);
1441
1442 return rc;
1443}
1444
1445int cam_fd_hw_mgr_deinit(struct device_node *of_node)
1446{
1447 CAM_DBG(CAM_FD, "HW Mgr Deinit");
1448
1449 cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work);
1450
1451 cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
1452 cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
1453 g_fd_hw_mgr.device_iommu.non_secure = -1;
1454
1455 mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
1456 mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
1457 mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
1458
1459 return 0;
1460}
1461
1462int cam_fd_hw_mgr_init(struct device_node *of_node,
1463 struct cam_hw_mgr_intf *hw_mgr_intf)
1464{
1465 int count, i, rc = 0;
1466 struct cam_hw_intf *hw_intf = NULL;
1467 struct cam_fd_hw_mgr_ctx *hw_mgr_ctx;
1468 struct cam_fd_device *hw_device;
1469 struct cam_fd_mgr_frame_request *frame_req;
1470
1471 if (!of_node || !hw_mgr_intf) {
1472 CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK",
1473 of_node, hw_mgr_intf);
1474 return -EINVAL;
1475 }
1476
1477 memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr));
1478 memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf));
1479
1480 mutex_init(&g_fd_hw_mgr.ctx_mutex);
1481 mutex_init(&g_fd_hw_mgr.frame_req_mutex);
1482 mutex_init(&g_fd_hw_mgr.hw_mgr_mutex);
1483 spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock);
1484
1485 count = of_property_count_strings(of_node, "compat-hw-name");
1486 if (!count || (count > CAM_FD_HW_MAX)) {
1487 CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count);
1488 return -EINVAL;
1489 }
1490 g_fd_hw_mgr.num_devices = count;
1491
1492 g_fd_hw_mgr.raw_results_available = false;
1493 g_fd_hw_mgr.supported_modes = 0;
1494
1495 for (i = 0; i < count; i++) {
1496 hw_device = &g_fd_hw_mgr.hw_device[i];
1497
1498 rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf);
1499 if (rc) {
1500 CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc);
1501 return rc;
1502 }
1503
1504 mutex_init(&hw_device->lock);
1505
1506 hw_device->valid = true;
1507 hw_device->hw_intf = hw_intf;
1508 hw_device->ready_to_process = true;
1509
1510 if (hw_device->hw_intf->hw_ops.process_cmd) {
1511 struct cam_fd_hw_cmd_set_irq_cb irq_cb_args;
1512
1513 irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb;
1514 irq_cb_args.data = hw_device;
1515
1516 rc = hw_device->hw_intf->hw_ops.process_cmd(
1517 hw_device->hw_intf->hw_priv,
1518 CAM_FD_HW_CMD_REGISTER_CALLBACK,
1519 &irq_cb_args, sizeof(irq_cb_args));
1520 if (rc) {
1521 CAM_ERR(CAM_FD,
1522 "Failed in REGISTER_CALLBACK %d", rc);
1523 return rc;
1524 }
1525 }
1526
1527 if (hw_device->hw_intf->hw_ops.get_hw_caps) {
1528 rc = hw_device->hw_intf->hw_ops.get_hw_caps(
1529 hw_intf->hw_priv, &hw_device->hw_caps,
1530 sizeof(hw_device->hw_caps));
1531 if (rc) {
1532 CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc);
1533 return rc;
1534 }
1535
1536 g_fd_hw_mgr.raw_results_available |=
1537 hw_device->hw_caps.raw_results_available;
1538 g_fd_hw_mgr.supported_modes |=
1539 hw_device->hw_caps.supported_modes;
1540
1541 CAM_DBG(CAM_FD,
1542 "Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]",
1543 hw_device->hw_caps.supported_modes,
1544 hw_device->hw_caps.raw_results_available,
1545 g_fd_hw_mgr.supported_modes,
1546 g_fd_hw_mgr.raw_results_available);
1547 }
1548 }
1549
1550 INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list);
1551 INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list);
1552 INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list);
1553 INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high);
1554 INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal);
1555 INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list);
1556
1557 g_fd_hw_mgr.device_iommu.non_secure = -1;
1558 g_fd_hw_mgr.device_iommu.secure = -1;
1559 g_fd_hw_mgr.cdm_iommu.non_secure = -1;
1560 g_fd_hw_mgr.cdm_iommu.secure = -1;
1561
1562 rc = cam_smmu_get_handle("fd",
1563 &g_fd_hw_mgr.device_iommu.non_secure);
1564 if (rc) {
1565 CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc);
1566 goto destroy_mutex;
1567 }
1568
1569 rc = cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_ATTACH);
1570 if (rc) {
1571 CAM_ERR(CAM_FD, "FD attach iommu handle failed, rc=%d", rc);
1572 goto destroy_smmu;
1573 }
1574
1575 rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu);
1576 if (rc)
1577 CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles");
1578
1579 CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)",
1580 g_fd_hw_mgr.device_iommu.non_secure,
1581 g_fd_hw_mgr.device_iommu.secure,
1582 g_fd_hw_mgr.cdm_iommu.non_secure,
1583 g_fd_hw_mgr.cdm_iommu.secure);
1584
1585 /* Init hw mgr contexts and add to free list */
1586 for (i = 0; i < CAM_CTX_MAX; i++) {
1587 hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i];
1588
1589 memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx));
1590 INIT_LIST_HEAD(&hw_mgr_ctx->list);
1591
1592 hw_mgr_ctx->ctx_index = i;
1593 hw_mgr_ctx->device_index = -1;
1594 hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr;
1595
1596 list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list);
1597 }
1598
1599 /* Init hw mgr frame requests and add to free list */
1600 for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
1601 frame_req = &g_fd_hw_mgr.frame_req[i];
1602
1603 memset(frame_req, 0x0, sizeof(*frame_req));
1604 INIT_LIST_HEAD(&frame_req->list);
1605
1606 list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list);
1607 }
1608
1609 rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
1610 &g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ);
1611 if (rc) {
1612 CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
1613 goto detach_smmu;
1614 }
1615
1616 for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++)
1617 g_fd_hw_mgr.work->task.pool[i].payload =
1618 &g_fd_hw_mgr.work_data[i];
1619
1620 /* Setup hw cap so that we can just return the info when requested */
1621 memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps));
1622 g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu;
1623 g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu;
1624 g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps;
1625
1626 CAM_DBG(CAM_FD,
1627 "IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]",
1628 g_fd_hw_mgr.fd_caps.device_iommu.secure,
1629 g_fd_hw_mgr.fd_caps.device_iommu.non_secure,
1630 g_fd_hw_mgr.fd_caps.cdm_iommu.secure,
1631 g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure,
1632 g_fd_hw_mgr.fd_caps.hw_caps.core_version.major,
1633 g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor,
1634 g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major,
1635 g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor);
1636
1637 hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr;
1638 hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps;
1639 hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire;
1640 hw_mgr_intf->hw_release = cam_fd_mgr_hw_release;
1641 hw_mgr_intf->hw_start = cam_fd_mgr_hw_start;
1642 hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop;
1643 hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update;
1644 hw_mgr_intf->hw_config = cam_fd_mgr_hw_config;
1645 hw_mgr_intf->hw_read = NULL;
1646 hw_mgr_intf->hw_write = NULL;
1647 hw_mgr_intf->hw_close = NULL;
1648
1649 return rc;
1650
1651detach_smmu:
1652 cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
1653destroy_smmu:
1654 cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
1655 g_fd_hw_mgr.device_iommu.non_secure = -1;
1656destroy_mutex:
1657 mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
1658 mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
1659 mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
1660
1661 return rc;
1662}
1663