| /* Copyright (c) 2017, The Linux Foundation. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 and |
| * only version 2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| */ |
| |
| #include <linux/slab.h> |
| #include <linux/uaccess.h> |
| #include "cam_context.h" |
| #include "cam_debug_util.h" |
| |
| static int cam_context_handle_hw_event(void *context, uint32_t evt_id, |
| void *evt_data) |
| { |
| int rc = 0; |
| struct cam_context *ctx = (struct cam_context *)context; |
| |
| if (!ctx || !ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (ctx->state_machine[ctx->state].irq_ops) |
| rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id, |
| evt_data); |
| else |
| CAM_DBG(CAM_CORE, |
| "No function to handle event %d in dev %d, state %d", |
| evt_id, ctx->dev_hdl, ctx->state); |
| return rc; |
| } |
| |
| int cam_context_handle_crm_get_dev_info(struct cam_context *ctx, |
| struct cam_req_mgr_device_info *info) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!info) { |
| CAM_ERR(CAM_CORE, "Invalid get device info payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) { |
| rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info( |
| ctx, info); |
| } else { |
| CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_crm_link(struct cam_context *ctx, |
| struct cam_req_mgr_core_dev_link_setup *link) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!link) { |
| CAM_ERR(CAM_CORE, "Invalid link payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].crm_ops.link) { |
| rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link); |
| } else { |
| CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_crm_unlink(struct cam_context *ctx, |
| struct cam_req_mgr_core_dev_link_setup *unlink) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!unlink) { |
| CAM_ERR(CAM_CORE, "Invalid unlink payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].crm_ops.unlink) { |
| rc = ctx->state_machine[ctx->state].crm_ops.unlink( |
| ctx, unlink); |
| } else { |
| CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_crm_apply_req(struct cam_context *ctx, |
| struct cam_req_mgr_apply_request *apply) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!apply) { |
| CAM_ERR(CAM_CORE, "Invalid apply request payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].crm_ops.apply_req) { |
| rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx, |
| apply); |
| } else { |
| CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_crm_flush_req(struct cam_context *ctx, |
| struct cam_req_mgr_flush_request *flush) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].crm_ops.flush_req) { |
| rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx, |
| flush); |
| } else { |
| CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_acquire_dev(struct cam_context *ctx, |
| struct cam_acquire_dev_cmd *cmd) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!cmd) { |
| CAM_ERR(CAM_CORE, "Invalid acquire device command payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) { |
| rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev( |
| ctx, cmd); |
| } else { |
| CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d", |
| cmd->dev_handle, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_release_dev(struct cam_context *ctx, |
| struct cam_release_dev_cmd *cmd) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!cmd) { |
| CAM_ERR(CAM_CORE, "Invalid release device command payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) { |
| rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev( |
| ctx, cmd); |
| } else { |
| CAM_ERR(CAM_CORE, "No release device in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_config_dev(struct cam_context *ctx, |
| struct cam_config_dev_cmd *cmd) |
| { |
| int rc; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!cmd) { |
| CAM_ERR(CAM_CORE, "Invalid config device command payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) { |
| rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev( |
| ctx, cmd); |
| } else { |
| CAM_ERR(CAM_CORE, "No config device in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| rc = -EPROTO; |
| } |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_start_dev(struct cam_context *ctx, |
| struct cam_start_stop_dev_cmd *cmd) |
| { |
| int rc = 0; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!cmd) { |
| CAM_ERR(CAM_CORE, "Invalid start device command payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].ioctl_ops.start_dev) |
| rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev( |
| ctx, cmd); |
| else |
| /* start device can be optional for some driver */ |
| CAM_DBG(CAM_CORE, "No start device in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_handle_stop_dev(struct cam_context *ctx, |
| struct cam_start_stop_dev_cmd *cmd) |
| { |
| int rc = 0; |
| |
| if (!ctx->state_machine) { |
| CAM_ERR(CAM_CORE, "Context is not ready"); |
| return -EINVAL; |
| } |
| |
| if (!cmd) { |
| CAM_ERR(CAM_CORE, "Invalid stop device command payload"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&ctx->ctx_mutex); |
| if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) |
| rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev( |
| ctx, cmd); |
| else |
| /* stop device can be optional for some driver */ |
| CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d", |
| ctx->dev_hdl, ctx->state); |
| mutex_unlock(&ctx->ctx_mutex); |
| |
| return rc; |
| } |
| |
| int cam_context_init(struct cam_context *ctx, |
| struct cam_req_mgr_kmd_ops *crm_node_intf, |
| struct cam_hw_mgr_intf *hw_mgr_intf, |
| struct cam_ctx_request *req_list, |
| uint32_t req_size) |
| { |
| int i; |
| |
| /* crm_node_intf is optinal */ |
| if (!ctx || !hw_mgr_intf || !req_list) { |
| CAM_ERR(CAM_CORE, "Invalid input parameters"); |
| return -EINVAL; |
| } |
| |
| memset(ctx, 0, sizeof(*ctx)); |
| |
| INIT_LIST_HEAD(&ctx->list); |
| mutex_init(&ctx->ctx_mutex); |
| spin_lock_init(&ctx->lock); |
| |
| ctx->ctx_crm_intf = NULL; |
| ctx->crm_ctx_intf = crm_node_intf; |
| ctx->hw_mgr_intf = hw_mgr_intf; |
| ctx->irq_cb_intf = cam_context_handle_hw_event; |
| |
| INIT_LIST_HEAD(&ctx->active_req_list); |
| INIT_LIST_HEAD(&ctx->wait_req_list); |
| INIT_LIST_HEAD(&ctx->pending_req_list); |
| INIT_LIST_HEAD(&ctx->free_req_list); |
| ctx->req_list = req_list; |
| ctx->req_size = req_size; |
| for (i = 0; i < req_size; i++) { |
| INIT_LIST_HEAD(&ctx->req_list[i].list); |
| list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list); |
| } |
| ctx->state = CAM_CTX_AVAILABLE; |
| ctx->state_machine = NULL; |
| ctx->ctx_priv = NULL; |
| |
| return 0; |
| } |
| |
| int cam_context_deinit(struct cam_context *ctx) |
| { |
| if (!ctx) |
| return -EINVAL; |
| |
| /** |
| * This is called from platform device remove. |
| * Everyting should be released at this moment. |
| * so we just free the memory for the context |
| */ |
| if (ctx->state != CAM_CTX_AVAILABLE) |
| CAM_ERR(CAM_CORE, "Device did not shutdown cleanly"); |
| |
| memset(ctx, 0, sizeof(*ctx)); |
| |
| return 0; |
| } |