blob: d5ad17dfc24ddc1eafbce0c212a607c5598cbc1c [file] [log] [blame]
Eunchul Kimcb471f142012-12-14 18:10:31 +09001/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090015#include <linux/platform_device.h>
16#include <linux/types.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090019
20#include <drm/drmP.h>
21#include <drm/exynos_drm.h>
22#include "exynos_drm_drv.h"
23#include "exynos_drm_gem.h"
24#include "exynos_drm_ipp.h"
Eunchul Kimc12e2612012-12-14 17:58:54 +090025#include "exynos_drm_iommu.h"
Eunchul Kimcb471f142012-12-14 18:10:31 +090026
27/*
Eunchul Kim6fe891f2012-12-22 17:49:26 +090028 * IPP stands for Image Post Processing and
Eunchul Kimcb471f142012-12-14 18:10:31 +090029 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
32 */
33
34/*
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
43 */
44
45#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
47
Seung-Woo Kim43f41902013-04-23 14:02:53 +090048/* platform device pointer for ipp device. */
49static struct platform_device *exynos_drm_ipp_pdev;
50
Eunchul Kimcb471f142012-12-14 18:10:31 +090051/*
52 * A structure of event.
53 *
54 * @base: base of event.
55 * @event: ipp event.
56 */
57struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
60};
61
62/*
63 * A structure of memory node.
64 *
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
71 */
72struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
75 u32 prop_id;
76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info;
Eunchul Kimcb471f142012-12-14 18:10:31 +090078};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
Seung-Woo Kim43f41902013-04-23 14:02:53 +0900105int exynos_platform_device_ipp_register(void)
106{
107 struct platform_device *pdev;
108
109 if (exynos_drm_ipp_pdev)
110 return -EEXIST;
111
112 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
113 if (IS_ERR(pdev))
114 return PTR_ERR(pdev);
115
116 exynos_drm_ipp_pdev = pdev;
117
118 return 0;
119}
120
121void exynos_platform_device_ipp_unregister(void)
122{
123 if (exynos_drm_ipp_pdev) {
124 platform_device_unregister(exynos_drm_ipp_pdev);
125 exynos_drm_ipp_pdev = NULL;
126 }
127}
128
Eunchul Kimcb471f142012-12-14 18:10:31 +0900129int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
130{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900131 mutex_lock(&exynos_drm_ippdrv_lock);
132 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
133 mutex_unlock(&exynos_drm_ippdrv_lock);
134
135 return 0;
136}
137
138int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
139{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900140 mutex_lock(&exynos_drm_ippdrv_lock);
141 list_del(&ippdrv->drv_list);
142 mutex_unlock(&exynos_drm_ippdrv_lock);
143
144 return 0;
145}
146
Andrzej Hajda12ff54d2014-07-03 15:10:36 +0200147static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900148{
149 int ret;
150
Eunchul Kimcb471f142012-12-14 18:10:31 +0900151 mutex_lock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800152 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900153 mutex_unlock(lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900154
Andrzej Hajda12ff54d2014-07-03 15:10:36 +0200155 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900156}
157
YoungJun Cho075436b2014-05-26 10:17:19 +0200158static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
159{
160 mutex_lock(lock);
161 idr_remove(id_idr, id);
162 mutex_unlock(lock);
163}
164
Eunchul Kimcb471f142012-12-14 18:10:31 +0900165static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
166{
167 void *obj;
168
Eunchul Kimcb471f142012-12-14 18:10:31 +0900169 mutex_lock(lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900170 obj = idr_find(id_idr, id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900171 mutex_unlock(lock);
172
173 return obj;
174}
175
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200176static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
177 struct drm_exynos_ipp_property *property)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900178{
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200179 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
180 !pm_runtime_suspended(ippdrv->dev)))
181 return -EBUSY;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900182
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200183 if (ippdrv->check_property &&
184 ippdrv->check_property(ippdrv->dev, property))
185 return -EINVAL;
186
187 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900188}
189
190static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
191 struct drm_exynos_ipp_property *property)
192{
193 struct exynos_drm_ippdrv *ippdrv;
194 u32 ipp_id = property->ipp_id;
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200195 int ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900196
197 if (ipp_id) {
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200198 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200199 if (!ippdrv) {
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200200 DRM_DEBUG("ipp%d driver not found\n", ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200201 return ERR_PTR(-ENODEV);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900202 }
203
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200204 ret = ipp_check_driver(ippdrv, property);
205 if (ret < 0) {
206 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
207 return ERR_PTR(ret);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900208 }
209
210 return ippdrv;
211 } else {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900212 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200213 ret = ipp_check_driver(ippdrv, property);
214 if (ret == 0)
215 return ippdrv;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900216 }
217
Andrzej Hajda9cc7d852014-07-03 15:10:37 +0200218 DRM_DEBUG("cannot find driver suitable for given property.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900219 }
220
221 return ERR_PTR(-ENODEV);
222}
223
224static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
225{
226 struct exynos_drm_ippdrv *ippdrv;
227 struct drm_exynos_ipp_cmd_node *c_node;
228 int count = 0;
229
YoungJun Chocbc4c332013-06-12 10:44:40 +0900230 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900231
Eunchul Kimcb471f142012-12-14 18:10:31 +0900232 /*
233 * This case is search ipp driver by prop_id handle.
234 * sometimes, ipp subsystem find driver by prop_id.
Geert Uytterhoeven9fca9ac2014-03-11 11:23:37 +0100235 * e.g PAUSE state, queue buf, command control.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900236 */
237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900238 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900239
YoungJun Cho7f5af052014-05-26 10:17:18 +0200240 mutex_lock(&ippdrv->cmd_lock);
241 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
242 if (c_node->property.prop_id == prop_id) {
243 mutex_unlock(&ippdrv->cmd_lock);
YoungJun Choc66ce402014-05-26 10:17:15 +0200244 return ippdrv;
YoungJun Cho7f5af052014-05-26 10:17:18 +0200245 }
Eunchul Kimcb471f142012-12-14 18:10:31 +0900246 }
YoungJun Cho7f5af052014-05-26 10:17:18 +0200247 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900248 }
249
250 return ERR_PTR(-ENODEV);
251}
252
253int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
254 struct drm_file *file)
255{
256 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200257 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900258 struct ipp_context *ctx = get_ipp_context(dev);
259 struct drm_exynos_ipp_prop_list *prop_list = data;
260 struct exynos_drm_ippdrv *ippdrv;
261 int count = 0;
262
Eunchul Kimcb471f142012-12-14 18:10:31 +0900263 if (!ctx) {
264 DRM_ERROR("invalid context.\n");
265 return -EINVAL;
266 }
267
268 if (!prop_list) {
269 DRM_ERROR("invalid property parameter.\n");
270 return -EINVAL;
271 }
272
YoungJun Chocbc4c332013-06-12 10:44:40 +0900273 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900274
275 if (!prop_list->ipp_id) {
276 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
277 count++;
YoungJun Cho7f5af052014-05-26 10:17:18 +0200278
Eunchul Kimcb471f142012-12-14 18:10:31 +0900279 /*
280 * Supports ippdrv list count for user application.
281 * First step user application getting ippdrv count.
282 * and second step getting ippdrv capability using ipp_id.
283 */
284 prop_list->count = count;
285 } else {
286 /*
287 * Getting ippdrv capability by ipp_id.
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900288 * some device not supported wb, output interface.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900289 * so, user application detect correct ipp driver
290 * using this ioctl.
291 */
292 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
293 prop_list->ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200294 if (!ippdrv) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900295 DRM_ERROR("not found ipp%d driver.\n",
296 prop_list->ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200297 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900298 }
299
Andrzej Hajda31646052014-05-19 12:54:05 +0200300 *prop_list = ippdrv->prop_list;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900301 }
302
303 return 0;
304}
305
306static void ipp_print_property(struct drm_exynos_ipp_property *property,
307 int idx)
308{
309 struct drm_exynos_ipp_config *config = &property->config[idx];
310 struct drm_exynos_pos *pos = &config->pos;
311 struct drm_exynos_sz *sz = &config->sz;
312
YoungJun Chocbc4c332013-06-12 10:44:40 +0900313 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
314 property->prop_id, idx ? "dst" : "src", config->fmt);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900315
YoungJun Chocbc4c332013-06-12 10:44:40 +0900316 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
317 pos->x, pos->y, pos->w, pos->h,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900318 sz->hsize, sz->vsize, config->flip, config->degree);
319}
320
Eunchul Kimcb471f142012-12-14 18:10:31 +0900321static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
322{
323 struct drm_exynos_ipp_cmd_work *cmd_work;
324
Eunchul Kimcb471f142012-12-14 18:10:31 +0900325 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900326 if (!cmd_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900327 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900328
329 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
330
331 return cmd_work;
332}
333
334static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
335{
336 struct drm_exynos_ipp_event_work *event_work;
337
Eunchul Kimcb471f142012-12-14 18:10:31 +0900338 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900339 if (!event_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900340 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900341
Andrzej Hajda60b61c22014-07-03 15:10:26 +0200342 INIT_WORK(&event_work->work, ipp_sched_event);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900343
344 return event_work;
345}
346
347int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
348 struct drm_file *file)
349{
350 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200351 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900352 struct ipp_context *ctx = get_ipp_context(dev);
353 struct drm_exynos_ipp_property *property = data;
354 struct exynos_drm_ippdrv *ippdrv;
355 struct drm_exynos_ipp_cmd_node *c_node;
Andrzej Hajda18383cb2014-09-02 14:56:21 +0200356 u32 prop_id;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900357 int ret, i;
358
Eunchul Kimcb471f142012-12-14 18:10:31 +0900359 if (!ctx) {
360 DRM_ERROR("invalid context.\n");
361 return -EINVAL;
362 }
363
364 if (!property) {
365 DRM_ERROR("invalid property parameter.\n");
366 return -EINVAL;
367 }
368
Andrzej Hajda18383cb2014-09-02 14:56:21 +0200369 prop_id = property->prop_id;
370
Eunchul Kimcb471f142012-12-14 18:10:31 +0900371 /*
372 * This is log print for user application property.
373 * user application set various property.
374 */
375 for_each_ipp_ops(i)
376 ipp_print_property(property, i);
377
378 /*
Andrzej Hajda18383cb2014-09-02 14:56:21 +0200379 * In case prop_id is not zero try to set existing property.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900380 */
Andrzej Hajda18383cb2014-09-02 14:56:21 +0200381 if (prop_id) {
382 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
383
384 if (!c_node || c_node->filp != file) {
385 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386 return -EINVAL;
387 }
388
389 if (c_node->state != IPP_STATE_STOP) {
390 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391 return -EINVAL;
392 }
393
394 c_node->property = *property;
395
396 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900397 }
398
399 /* find ipp driver using ipp id */
400 ippdrv = ipp_find_driver(ctx, property);
Sachin Kamatf0250452013-04-29 12:27:06 +0530401 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900402 DRM_ERROR("failed to get ipp driver.\n");
403 return -EINVAL;
404 }
405
406 /* allocate command node */
407 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900408 if (!c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900409 return -ENOMEM;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900410
Andrzej Hajda12ff54d2014-07-03 15:10:36 +0200411 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
412 if (ret < 0) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900413 DRM_ERROR("failed to create id.\n");
414 goto err_clear;
415 }
Andrzej Hajda12ff54d2014-07-03 15:10:36 +0200416 property->prop_id = ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900417
YoungJun Chocbc4c332013-06-12 10:44:40 +0900418 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
419 property->prop_id, property->cmd, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900420
421 /* stored property information and ippdrv in private data */
Eunchul Kimcb471f142012-12-14 18:10:31 +0900422 c_node->property = *property;
423 c_node->state = IPP_STATE_IDLE;
Andrzej Hajda945a0aa2014-08-28 11:07:27 +0200424 c_node->filp = file;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900425
426 c_node->start_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530427 if (IS_ERR(c_node->start_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900428 DRM_ERROR("failed to create start work.\n");
Julia Lawallbe19d932014-11-23 14:11:15 +0100429 ret = PTR_ERR(c_node->start_work);
YoungJun Cho075436b2014-05-26 10:17:19 +0200430 goto err_remove_id;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900431 }
432
433 c_node->stop_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530434 if (IS_ERR(c_node->stop_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900435 DRM_ERROR("failed to create stop work.\n");
Julia Lawallbe19d932014-11-23 14:11:15 +0100436 ret = PTR_ERR(c_node->stop_work);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900437 goto err_free_start;
438 }
439
440 c_node->event_work = ipp_create_event_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530441 if (IS_ERR(c_node->event_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900442 DRM_ERROR("failed to create event work.\n");
Julia Lawallbe19d932014-11-23 14:11:15 +0100443 ret = PTR_ERR(c_node->event_work);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900444 goto err_free_stop;
445 }
446
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200447 mutex_init(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900448 mutex_init(&c_node->mem_lock);
449 mutex_init(&c_node->event_lock);
450
451 init_completion(&c_node->start_complete);
452 init_completion(&c_node->stop_complete);
453
454 for_each_ipp_ops(i)
455 INIT_LIST_HEAD(&c_node->mem_list[i]);
456
457 INIT_LIST_HEAD(&c_node->event_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +0200458 mutex_lock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900459 list_add_tail(&c_node->list, &ippdrv->cmd_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +0200460 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900461
462 /* make dedicated state without m2m */
463 if (!ipp_is_m2m_cmd(property->cmd))
464 ippdrv->dedicated = true;
465
466 return 0;
467
468err_free_stop:
469 kfree(c_node->stop_work);
470err_free_start:
471 kfree(c_node->start_work);
YoungJun Cho075436b2014-05-26 10:17:19 +0200472err_remove_id:
473 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900474err_clear:
475 kfree(c_node);
476 return ret;
477}
478
Andrzej Hajdac4a856a2014-08-28 11:07:31 +0200479static int ipp_put_mem_node(struct drm_device *drm_dev,
480 struct drm_exynos_ipp_cmd_node *c_node,
481 struct drm_exynos_ipp_mem_node *m_node)
482{
483 int i;
484
485 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
486
487 if (!m_node) {
488 DRM_ERROR("invalid dequeue node.\n");
489 return -EFAULT;
490 }
491
492 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
493
494 /* put gem buffer */
495 for_each_ipp_planar(i) {
496 unsigned long handle = m_node->buf_info.handles[i];
497 if (handle)
498 exynos_drm_gem_put_dma_addr(drm_dev, handle,
499 c_node->filp);
500 }
501
502 list_del(&m_node->list);
503 kfree(m_node);
504
505 return 0;
506}
507
508static struct drm_exynos_ipp_mem_node
509 *ipp_get_mem_node(struct drm_device *drm_dev,
Andrzej Hajdac4a856a2014-08-28 11:07:31 +0200510 struct drm_exynos_ipp_cmd_node *c_node,
511 struct drm_exynos_ipp_queue_buf *qbuf)
512{
513 struct drm_exynos_ipp_mem_node *m_node;
514 struct drm_exynos_ipp_buf_info *buf_info;
515 int i;
516
517 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
518 if (!m_node)
519 return ERR_PTR(-ENOMEM);
520
521 buf_info = &m_node->buf_info;
522
523 /* operations, buffer id */
524 m_node->ops_id = qbuf->ops_id;
525 m_node->prop_id = qbuf->prop_id;
526 m_node->buf_id = qbuf->buf_id;
527 INIT_LIST_HEAD(&m_node->list);
528
529 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
530 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
531
532 for_each_ipp_planar(i) {
533 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
534
535 /* get dma address by handle */
536 if (qbuf->handle[i]) {
537 dma_addr_t *addr;
538
539 addr = exynos_drm_gem_get_dma_addr(drm_dev,
Andrzej Hajdad9b97342014-09-02 14:55:06 +0200540 qbuf->handle[i], c_node->filp);
Andrzej Hajdac4a856a2014-08-28 11:07:31 +0200541 if (IS_ERR(addr)) {
542 DRM_ERROR("failed to get addr.\n");
543 ipp_put_mem_node(drm_dev, c_node, m_node);
544 return ERR_PTR(-EFAULT);
545 }
546
547 buf_info->handles[i] = qbuf->handle[i];
548 buf_info->base[i] = *addr;
549 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
550 buf_info->base[i], buf_info->handles[i]);
551 }
552 }
553
554 mutex_lock(&c_node->mem_lock);
555 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
556 mutex_unlock(&c_node->mem_lock);
557
558 return m_node;
559}
560
561static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
562 struct drm_exynos_ipp_cmd_node *c_node, int ops)
563{
564 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
565 struct list_head *head = &c_node->mem_list[ops];
566
567 mutex_lock(&c_node->mem_lock);
568
569 list_for_each_entry_safe(m_node, tm_node, head, list) {
570 int ret;
571
572 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
573 if (ret)
574 DRM_ERROR("failed to put m_node.\n");
575 }
576
577 mutex_unlock(&c_node->mem_lock);
578}
579
Andrzej Hajdac0592c82014-08-28 11:07:35 +0200580static void ipp_free_event(struct drm_pending_event *event)
581{
582 kfree(event);
583}
584
585static int ipp_get_event(struct drm_device *drm_dev,
Andrzej Hajdac0592c82014-08-28 11:07:35 +0200586 struct drm_exynos_ipp_cmd_node *c_node,
587 struct drm_exynos_ipp_queue_buf *qbuf)
588{
589 struct drm_exynos_ipp_send_event *e;
590 unsigned long flags;
591
592 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
593
594 e = kzalloc(sizeof(*e), GFP_KERNEL);
595 if (!e) {
596 spin_lock_irqsave(&drm_dev->event_lock, flags);
Andrzej Hajdad9b97342014-09-02 14:55:06 +0200597 c_node->filp->event_space += sizeof(e->event);
Andrzej Hajdac0592c82014-08-28 11:07:35 +0200598 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
599 return -ENOMEM;
600 }
601
602 /* make event */
603 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
604 e->event.base.length = sizeof(e->event);
605 e->event.user_data = qbuf->user_data;
606 e->event.prop_id = qbuf->prop_id;
607 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
608 e->base.event = &e->event.base;
Andrzej Hajdad9b97342014-09-02 14:55:06 +0200609 e->base.file_priv = c_node->filp;
Andrzej Hajdac0592c82014-08-28 11:07:35 +0200610 e->base.destroy = ipp_free_event;
611 mutex_lock(&c_node->event_lock);
612 list_add_tail(&e->base.link, &c_node->event_list);
613 mutex_unlock(&c_node->event_lock);
614
615 return 0;
616}
617
618static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
619 struct drm_exynos_ipp_queue_buf *qbuf)
620{
621 struct drm_exynos_ipp_send_event *e, *te;
622 int count = 0;
623
624 mutex_lock(&c_node->event_lock);
625 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
626 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
627
628 /*
629 * qbuf == NULL condition means all event deletion.
630 * stop operations want to delete all event list.
631 * another case delete only same buf id.
632 */
633 if (!qbuf) {
634 /* delete list */
635 list_del(&e->base.link);
636 kfree(e);
637 }
638
639 /* compare buffer id */
640 if (qbuf && (qbuf->buf_id ==
641 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
642 /* delete list */
643 list_del(&e->base.link);
644 kfree(e);
645 goto out_unlock;
646 }
647 }
648
649out_unlock:
650 mutex_unlock(&c_node->event_lock);
651 return;
652}
653
YoungJun Cho075436b2014-05-26 10:17:19 +0200654static void ipp_clean_cmd_node(struct ipp_context *ctx,
655 struct drm_exynos_ipp_cmd_node *c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900656{
Andrzej Hajda22e816f2014-08-28 11:07:32 +0200657 int i;
658
Andrzej Hajda6f7d48e2014-08-28 11:07:26 +0200659 /* cancel works */
660 cancel_work_sync(&c_node->start_work->work);
661 cancel_work_sync(&c_node->stop_work->work);
662 cancel_work_sync(&c_node->event_work->work);
663
Andrzej Hajdac0592c82014-08-28 11:07:35 +0200664 /* put event */
665 ipp_put_event(c_node, NULL);
666
Andrzej Hajda22e816f2014-08-28 11:07:32 +0200667 for_each_ipp_ops(i)
668 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
669
Eunchul Kimcb471f142012-12-14 18:10:31 +0900670 /* delete list */
671 list_del(&c_node->list);
672
YoungJun Cho075436b2014-05-26 10:17:19 +0200673 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
674 c_node->property.prop_id);
675
Eunchul Kimcb471f142012-12-14 18:10:31 +0900676 /* destroy mutex */
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200677 mutex_destroy(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900678 mutex_destroy(&c_node->mem_lock);
679 mutex_destroy(&c_node->event_lock);
680
681 /* free command node */
682 kfree(c_node->start_work);
683 kfree(c_node->stop_work);
684 kfree(c_node->event_work);
685 kfree(c_node);
686}
687
Andrzej Hajdafb5ee012014-07-03 15:10:32 +0200688static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900689{
Andrzej Hajdafb5ee012014-07-03 15:10:32 +0200690 switch (c_node->property.cmd) {
691 case IPP_CMD_WB:
692 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
693 case IPP_CMD_OUTPUT:
694 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
695 case IPP_CMD_M2M:
696 default:
697 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
698 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900699 }
Eunchul Kimcb471f142012-12-14 18:10:31 +0900700}
701
702static struct drm_exynos_ipp_mem_node
703 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
704 struct drm_exynos_ipp_queue_buf *qbuf)
705{
706 struct drm_exynos_ipp_mem_node *m_node;
707 struct list_head *head;
708 int count = 0;
709
YoungJun Chocbc4c332013-06-12 10:44:40 +0900710 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900711
712 /* source/destination memory list */
713 head = &c_node->mem_list[qbuf->ops_id];
714
715 /* find memory node from memory list */
716 list_for_each_entry(m_node, head, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900717 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900718
719 /* compare buffer id */
720 if (m_node->buf_id == qbuf->buf_id)
721 return m_node;
722 }
723
724 return NULL;
725}
726
727static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
728 struct drm_exynos_ipp_cmd_node *c_node,
729 struct drm_exynos_ipp_mem_node *m_node)
730{
731 struct exynos_drm_ipp_ops *ops = NULL;
732 int ret = 0;
733
YoungJun Chocbc4c332013-06-12 10:44:40 +0900734 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900735
736 if (!m_node) {
737 DRM_ERROR("invalid queue node.\n");
738 return -EFAULT;
739 }
740
YoungJun Chocbc4c332013-06-12 10:44:40 +0900741 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900742
743 /* get operations callback */
744 ops = ippdrv->ops[m_node->ops_id];
745 if (!ops) {
746 DRM_ERROR("not support ops.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +0200747 return -EFAULT;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900748 }
749
750 /* set address and enable irq */
751 if (ops->set_addr) {
752 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
753 m_node->buf_id, IPP_BUF_ENQUEUE);
754 if (ret) {
755 DRM_ERROR("failed to set addr.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +0200756 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900757 }
758 }
759
Eunchul Kimcb471f142012-12-14 18:10:31 +0900760 return ret;
761}
762
Sachin Kamat0bc4a0a2013-01-14 12:29:10 +0530763static void ipp_handle_cmd_work(struct device *dev,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900764 struct exynos_drm_ippdrv *ippdrv,
765 struct drm_exynos_ipp_cmd_work *cmd_work,
766 struct drm_exynos_ipp_cmd_node *c_node)
767{
768 struct ipp_context *ctx = get_ipp_context(dev);
769
770 cmd_work->ippdrv = ippdrv;
771 cmd_work->c_node = c_node;
Andrzej Hajda05afb1a2014-08-28 11:07:33 +0200772 queue_work(ctx->cmd_workq, &cmd_work->work);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900773}
774
775static int ipp_queue_buf_with_run(struct device *dev,
776 struct drm_exynos_ipp_cmd_node *c_node,
777 struct drm_exynos_ipp_mem_node *m_node,
778 struct drm_exynos_ipp_queue_buf *qbuf)
779{
780 struct exynos_drm_ippdrv *ippdrv;
781 struct drm_exynos_ipp_property *property;
782 struct exynos_drm_ipp_ops *ops;
783 int ret;
784
Eunchul Kimcb471f142012-12-14 18:10:31 +0900785 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530786 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900787 DRM_ERROR("failed to get ipp driver.\n");
788 return -EFAULT;
789 }
790
791 ops = ippdrv->ops[qbuf->ops_id];
792 if (!ops) {
793 DRM_ERROR("failed to get ops.\n");
794 return -EFAULT;
795 }
796
797 property = &c_node->property;
798
799 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900800 DRM_DEBUG_KMS("bypass for invalid state.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900801 return 0;
802 }
803
YoungJun Cho220db6f2014-05-26 10:17:20 +0200804 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900805 if (!ipp_check_mem_list(c_node)) {
YoungJun Cho220db6f2014-05-26 10:17:20 +0200806 mutex_unlock(&c_node->mem_lock);
YoungJun Chocbc4c332013-06-12 10:44:40 +0900807 DRM_DEBUG_KMS("empty memory.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900808 return 0;
809 }
810
811 /*
812 * If set destination buffer and enabled clock,
813 * then m2m operations need start operations at queue_buf
814 */
815 if (ipp_is_m2m_cmd(property->cmd)) {
816 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
817
818 cmd_work->ctrl = IPP_CTRL_PLAY;
819 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
820 } else {
821 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
822 if (ret) {
YoungJun Cho220db6f2014-05-26 10:17:20 +0200823 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900824 DRM_ERROR("failed to set m node.\n");
825 return ret;
826 }
827 }
YoungJun Cho220db6f2014-05-26 10:17:20 +0200828 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900829
830 return 0;
831}
832
833static void ipp_clean_queue_buf(struct drm_device *drm_dev,
834 struct drm_exynos_ipp_cmd_node *c_node,
835 struct drm_exynos_ipp_queue_buf *qbuf)
836{
837 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
838
YoungJun Choc66ce402014-05-26 10:17:15 +0200839 /* delete list */
YoungJun Cho220db6f2014-05-26 10:17:20 +0200840 mutex_lock(&c_node->mem_lock);
YoungJun Choc66ce402014-05-26 10:17:15 +0200841 list_for_each_entry_safe(m_node, tm_node,
842 &c_node->mem_list[qbuf->ops_id], list) {
843 if (m_node->buf_id == qbuf->buf_id &&
844 m_node->ops_id == qbuf->ops_id)
845 ipp_put_mem_node(drm_dev, c_node, m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900846 }
YoungJun Cho220db6f2014-05-26 10:17:20 +0200847 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900848}
849
850int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
851 struct drm_file *file)
852{
853 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200854 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900855 struct ipp_context *ctx = get_ipp_context(dev);
856 struct drm_exynos_ipp_queue_buf *qbuf = data;
857 struct drm_exynos_ipp_cmd_node *c_node;
858 struct drm_exynos_ipp_mem_node *m_node;
859 int ret;
860
Eunchul Kimcb471f142012-12-14 18:10:31 +0900861 if (!qbuf) {
862 DRM_ERROR("invalid buf parameter.\n");
863 return -EINVAL;
864 }
865
866 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
867 DRM_ERROR("invalid ops parameter.\n");
868 return -EINVAL;
869 }
870
YoungJun Chocbc4c332013-06-12 10:44:40 +0900871 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
872 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
Eunchul Kimcb471f142012-12-14 18:10:31 +0900873 qbuf->buf_id, qbuf->buf_type);
874
875 /* find command node */
876 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
877 qbuf->prop_id);
Andrzej Hajda18383cb2014-09-02 14:56:21 +0200878 if (!c_node || c_node->filp != file) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900879 DRM_ERROR("failed to get command node.\n");
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200880 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900881 }
882
883 /* buffer control */
884 switch (qbuf->buf_type) {
885 case IPP_BUF_ENQUEUE:
886 /* get memory node */
Andrzej Hajdad9b97342014-09-02 14:55:06 +0200887 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900888 if (IS_ERR(m_node)) {
889 DRM_ERROR("failed to get m_node.\n");
890 return PTR_ERR(m_node);
891 }
892
893 /*
894 * first step get event for destination buffer.
895 * and second step when M2M case run with destination buffer
896 * if needed.
897 */
898 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
899 /* get event for destination buffer */
Andrzej Hajdad9b97342014-09-02 14:55:06 +0200900 ret = ipp_get_event(drm_dev, c_node, qbuf);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900901 if (ret) {
902 DRM_ERROR("failed to get event.\n");
903 goto err_clean_node;
904 }
905
906 /*
907 * M2M case run play control for streaming feature.
908 * other case set address and waiting.
909 */
910 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
911 if (ret) {
912 DRM_ERROR("failed to run command.\n");
913 goto err_clean_node;
914 }
915 }
916 break;
917 case IPP_BUF_DEQUEUE:
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200918 mutex_lock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900919
920 /* put event for destination buffer */
921 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
922 ipp_put_event(c_node, qbuf);
923
924 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
925
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200926 mutex_unlock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900927 break;
928 default:
929 DRM_ERROR("invalid buffer control.\n");
930 return -EINVAL;
931 }
932
933 return 0;
934
935err_clean_node:
936 DRM_ERROR("clean memory nodes.\n");
937
938 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
939 return ret;
940}
941
942static bool exynos_drm_ipp_check_valid(struct device *dev,
943 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
944{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900945 if (ctrl != IPP_CTRL_PLAY) {
946 if (pm_runtime_suspended(dev)) {
947 DRM_ERROR("pm:runtime_suspended.\n");
948 goto err_status;
949 }
950 }
951
952 switch (ctrl) {
953 case IPP_CTRL_PLAY:
954 if (state != IPP_STATE_IDLE)
955 goto err_status;
956 break;
957 case IPP_CTRL_STOP:
958 if (state == IPP_STATE_STOP)
959 goto err_status;
960 break;
961 case IPP_CTRL_PAUSE:
962 if (state != IPP_STATE_START)
963 goto err_status;
964 break;
965 case IPP_CTRL_RESUME:
966 if (state != IPP_STATE_STOP)
967 goto err_status;
968 break;
969 default:
970 DRM_ERROR("invalid state.\n");
971 goto err_status;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900972 }
973
974 return true;
975
976err_status:
977 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
978 return false;
979}
980
981int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
982 struct drm_file *file)
983{
984 struct drm_exynos_file_private *file_priv = file->driver_priv;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900985 struct exynos_drm_ippdrv *ippdrv = NULL;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200986 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900987 struct ipp_context *ctx = get_ipp_context(dev);
988 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
989 struct drm_exynos_ipp_cmd_work *cmd_work;
990 struct drm_exynos_ipp_cmd_node *c_node;
991
Eunchul Kimcb471f142012-12-14 18:10:31 +0900992 if (!ctx) {
993 DRM_ERROR("invalid context.\n");
994 return -EINVAL;
995 }
996
997 if (!cmd_ctrl) {
998 DRM_ERROR("invalid control parameter.\n");
999 return -EINVAL;
1000 }
1001
YoungJun Chocbc4c332013-06-12 10:44:40 +09001002 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001003 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1004
1005 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1006 if (IS_ERR(ippdrv)) {
1007 DRM_ERROR("failed to get ipp driver.\n");
1008 return PTR_ERR(ippdrv);
1009 }
1010
1011 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1012 cmd_ctrl->prop_id);
Andrzej Hajda18383cb2014-09-02 14:56:21 +02001013 if (!c_node || c_node->filp != file) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001014 DRM_ERROR("invalid command node list.\n");
Andrzej Hajda134f0e92014-07-03 15:10:34 +02001015 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001016 }
1017
1018 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1019 c_node->state)) {
1020 DRM_ERROR("invalid state.\n");
1021 return -EINVAL;
1022 }
1023
1024 switch (cmd_ctrl->ctrl) {
1025 case IPP_CTRL_PLAY:
1026 if (pm_runtime_suspended(ippdrv->dev))
1027 pm_runtime_get_sync(ippdrv->dev);
YoungJun Choebaf05c2014-05-26 10:17:16 +02001028
Eunchul Kimcb471f142012-12-14 18:10:31 +09001029 c_node->state = IPP_STATE_START;
1030
1031 cmd_work = c_node->start_work;
1032 cmd_work->ctrl = cmd_ctrl->ctrl;
1033 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001034 break;
1035 case IPP_CTRL_STOP:
1036 cmd_work = c_node->stop_work;
1037 cmd_work->ctrl = cmd_ctrl->ctrl;
1038 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1039
1040 if (!wait_for_completion_timeout(&c_node->stop_complete,
1041 msecs_to_jiffies(300))) {
1042 DRM_ERROR("timeout stop:prop_id[%d]\n",
1043 c_node->property.prop_id);
1044 }
1045
1046 c_node->state = IPP_STATE_STOP;
1047 ippdrv->dedicated = false;
YoungJun Cho7f5af052014-05-26 10:17:18 +02001048 mutex_lock(&ippdrv->cmd_lock);
YoungJun Cho075436b2014-05-26 10:17:19 +02001049 ipp_clean_cmd_node(ctx, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001050
1051 if (list_empty(&ippdrv->cmd_list))
1052 pm_runtime_put_sync(ippdrv->dev);
YoungJun Cho7f5af052014-05-26 10:17:18 +02001053 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001054 break;
1055 case IPP_CTRL_PAUSE:
1056 cmd_work = c_node->stop_work;
1057 cmd_work->ctrl = cmd_ctrl->ctrl;
1058 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1059
1060 if (!wait_for_completion_timeout(&c_node->stop_complete,
1061 msecs_to_jiffies(200))) {
1062 DRM_ERROR("timeout stop:prop_id[%d]\n",
1063 c_node->property.prop_id);
1064 }
1065
1066 c_node->state = IPP_STATE_STOP;
1067 break;
1068 case IPP_CTRL_RESUME:
1069 c_node->state = IPP_STATE_START;
1070 cmd_work = c_node->start_work;
1071 cmd_work->ctrl = cmd_ctrl->ctrl;
1072 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1073 break;
1074 default:
1075 DRM_ERROR("could not support this state currently.\n");
1076 return -EINVAL;
1077 }
1078
YoungJun Chocbc4c332013-06-12 10:44:40 +09001079 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001080 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1081
1082 return 0;
1083}
1084
1085int exynos_drm_ippnb_register(struct notifier_block *nb)
1086{
1087 return blocking_notifier_chain_register(
1088 &exynos_drm_ippnb_list, nb);
1089}
1090
1091int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1092{
1093 return blocking_notifier_chain_unregister(
1094 &exynos_drm_ippnb_list, nb);
1095}
1096
1097int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1098{
1099 return blocking_notifier_call_chain(
1100 &exynos_drm_ippnb_list, val, v);
1101}
1102
1103static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1104 struct drm_exynos_ipp_property *property)
1105{
1106 struct exynos_drm_ipp_ops *ops = NULL;
1107 bool swap = false;
1108 int ret, i;
1109
1110 if (!property) {
1111 DRM_ERROR("invalid property parameter.\n");
1112 return -EINVAL;
1113 }
1114
YoungJun Chocbc4c332013-06-12 10:44:40 +09001115 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001116
1117 /* reset h/w block */
1118 if (ippdrv->reset &&
1119 ippdrv->reset(ippdrv->dev)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001120 return -EINVAL;
1121 }
1122
1123 /* set source,destination operations */
1124 for_each_ipp_ops(i) {
1125 struct drm_exynos_ipp_config *config =
1126 &property->config[i];
1127
1128 ops = ippdrv->ops[i];
1129 if (!ops || !config) {
1130 DRM_ERROR("not support ops and config.\n");
1131 return -EINVAL;
1132 }
1133
1134 /* set format */
1135 if (ops->set_fmt) {
1136 ret = ops->set_fmt(ippdrv->dev, config->fmt);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001137 if (ret)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001138 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001139 }
1140
1141 /* set transform for rotation, flip */
1142 if (ops->set_transf) {
1143 ret = ops->set_transf(ippdrv->dev, config->degree,
1144 config->flip, &swap);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001145 if (ret)
1146 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001147 }
1148
1149 /* set size */
1150 if (ops->set_size) {
1151 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1152 &config->sz);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001153 if (ret)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001154 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001155 }
1156 }
1157
1158 return 0;
1159}
1160
1161static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1162 struct drm_exynos_ipp_cmd_node *c_node)
1163{
1164 struct drm_exynos_ipp_mem_node *m_node;
1165 struct drm_exynos_ipp_property *property = &c_node->property;
1166 struct list_head *head;
1167 int ret, i;
1168
YoungJun Chocbc4c332013-06-12 10:44:40 +09001169 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001170
1171 /* store command info in ippdrv */
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001172 ippdrv->c_node = c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001173
YoungJun Cho220db6f2014-05-26 10:17:20 +02001174 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001175 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001176 DRM_DEBUG_KMS("empty memory.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001177 ret = -ENOMEM;
1178 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001179 }
1180
1181 /* set current property in ippdrv */
1182 ret = ipp_set_property(ippdrv, property);
1183 if (ret) {
1184 DRM_ERROR("failed to set property.\n");
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001185 ippdrv->c_node = NULL;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001186 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001187 }
1188
1189 /* check command */
1190 switch (property->cmd) {
1191 case IPP_CMD_M2M:
1192 for_each_ipp_ops(i) {
1193 /* source/destination memory list */
1194 head = &c_node->mem_list[i];
1195
1196 m_node = list_first_entry(head,
1197 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001198
YoungJun Chocbc4c332013-06-12 10:44:40 +09001199 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001200
1201 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1202 if (ret) {
1203 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001204 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001205 }
1206 }
1207 break;
1208 case IPP_CMD_WB:
1209 /* destination memory list */
1210 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1211
1212 list_for_each_entry(m_node, head, list) {
1213 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1214 if (ret) {
1215 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001216 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001217 }
1218 }
1219 break;
1220 case IPP_CMD_OUTPUT:
1221 /* source memory list */
1222 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1223
1224 list_for_each_entry(m_node, head, list) {
1225 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1226 if (ret) {
1227 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001228 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001229 }
1230 }
1231 break;
1232 default:
1233 DRM_ERROR("invalid operations.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001234 ret = -EINVAL;
1235 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001236 }
YoungJun Cho220db6f2014-05-26 10:17:20 +02001237 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001238
YoungJun Chocbc4c332013-06-12 10:44:40 +09001239 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001240
1241 /* start operations */
1242 if (ippdrv->start) {
1243 ret = ippdrv->start(ippdrv->dev, property->cmd);
1244 if (ret) {
1245 DRM_ERROR("failed to start ops.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001246 ippdrv->c_node = NULL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001247 return ret;
1248 }
1249 }
1250
1251 return 0;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001252
1253err_unlock:
1254 mutex_unlock(&c_node->mem_lock);
1255 ippdrv->c_node = NULL;
1256 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001257}
1258
1259static int ipp_stop_property(struct drm_device *drm_dev,
1260 struct exynos_drm_ippdrv *ippdrv,
1261 struct drm_exynos_ipp_cmd_node *c_node)
1262{
Eunchul Kimcb471f142012-12-14 18:10:31 +09001263 struct drm_exynos_ipp_property *property = &c_node->property;
Andrzej Hajda8aa99dd2014-08-28 11:07:34 +02001264 int i;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001265
YoungJun Chocbc4c332013-06-12 10:44:40 +09001266 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001267
Andrzej Hajda8aa99dd2014-08-28 11:07:34 +02001268 /* stop operations */
1269 if (ippdrv->stop)
1270 ippdrv->stop(ippdrv->dev, property->cmd);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001271
1272 /* check command */
1273 switch (property->cmd) {
1274 case IPP_CMD_M2M:
Andrzej Hajdac4a856a2014-08-28 11:07:31 +02001275 for_each_ipp_ops(i)
1276 ipp_clean_mem_nodes(drm_dev, c_node, i);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001277 break;
1278 case IPP_CMD_WB:
Andrzej Hajdac4a856a2014-08-28 11:07:31 +02001279 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001280 break;
1281 case IPP_CMD_OUTPUT:
Andrzej Hajdac4a856a2014-08-28 11:07:31 +02001282 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001283 break;
1284 default:
1285 DRM_ERROR("invalid operations.\n");
Andrzej Hajda8aa99dd2014-08-28 11:07:34 +02001286 return -EINVAL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001287 }
1288
Andrzej Hajda8aa99dd2014-08-28 11:07:34 +02001289 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001290}
1291
1292void ipp_sched_cmd(struct work_struct *work)
1293{
1294 struct drm_exynos_ipp_cmd_work *cmd_work =
Andrzej Hajda05afb1a2014-08-28 11:07:33 +02001295 container_of(work, struct drm_exynos_ipp_cmd_work, work);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001296 struct exynos_drm_ippdrv *ippdrv;
1297 struct drm_exynos_ipp_cmd_node *c_node;
1298 struct drm_exynos_ipp_property *property;
1299 int ret;
1300
Eunchul Kimcb471f142012-12-14 18:10:31 +09001301 ippdrv = cmd_work->ippdrv;
1302 if (!ippdrv) {
1303 DRM_ERROR("invalid ippdrv list.\n");
1304 return;
1305 }
1306
1307 c_node = cmd_work->c_node;
1308 if (!c_node) {
1309 DRM_ERROR("invalid command node list.\n");
1310 return;
1311 }
1312
YoungJun Cho4e4fe552014-05-26 10:17:17 +02001313 mutex_lock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001314
1315 property = &c_node->property;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001316
1317 switch (cmd_work->ctrl) {
1318 case IPP_CTRL_PLAY:
1319 case IPP_CTRL_RESUME:
1320 ret = ipp_start_property(ippdrv, c_node);
1321 if (ret) {
1322 DRM_ERROR("failed to start property:prop_id[%d]\n",
1323 c_node->property.prop_id);
1324 goto err_unlock;
1325 }
1326
1327 /*
1328 * M2M case supports wait_completion of transfer.
1329 * because M2M case supports single unit operation
1330 * with multiple queue.
1331 * M2M need to wait completion of data transfer.
1332 */
1333 if (ipp_is_m2m_cmd(property->cmd)) {
1334 if (!wait_for_completion_timeout
1335 (&c_node->start_complete, msecs_to_jiffies(200))) {
1336 DRM_ERROR("timeout event:prop_id[%d]\n",
1337 c_node->property.prop_id);
1338 goto err_unlock;
1339 }
1340 }
1341 break;
1342 case IPP_CTRL_STOP:
1343 case IPP_CTRL_PAUSE:
1344 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1345 c_node);
1346 if (ret) {
1347 DRM_ERROR("failed to stop property.\n");
1348 goto err_unlock;
1349 }
1350
1351 complete(&c_node->stop_complete);
1352 break;
1353 default:
1354 DRM_ERROR("unknown control type\n");
1355 break;
1356 }
1357
YoungJun Chocbc4c332013-06-12 10:44:40 +09001358 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001359
1360err_unlock:
YoungJun Cho4e4fe552014-05-26 10:17:17 +02001361 mutex_unlock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001362}
1363
1364static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1365 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1366{
1367 struct drm_device *drm_dev = ippdrv->drm_dev;
1368 struct drm_exynos_ipp_property *property = &c_node->property;
1369 struct drm_exynos_ipp_mem_node *m_node;
1370 struct drm_exynos_ipp_queue_buf qbuf;
1371 struct drm_exynos_ipp_send_event *e;
1372 struct list_head *head;
1373 struct timeval now;
1374 unsigned long flags;
1375 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1376 int ret, i;
1377
1378 for_each_ipp_ops(i)
YoungJun Chocbc4c332013-06-12 10:44:40 +09001379 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001380
1381 if (!drm_dev) {
1382 DRM_ERROR("failed to get drm_dev.\n");
1383 return -EINVAL;
1384 }
1385
1386 if (!property) {
1387 DRM_ERROR("failed to get property.\n");
1388 return -EINVAL;
1389 }
1390
YoungJun Cho4d520762014-05-26 10:17:21 +02001391 mutex_lock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001392 if (list_empty(&c_node->event_list)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001393 DRM_DEBUG_KMS("event list is empty.\n");
YoungJun Cho4d520762014-05-26 10:17:21 +02001394 ret = 0;
1395 goto err_event_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001396 }
1397
YoungJun Cho220db6f2014-05-26 10:17:20 +02001398 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001399 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001400 DRM_DEBUG_KMS("empty memory.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001401 ret = 0;
1402 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001403 }
1404
1405 /* check command */
1406 switch (property->cmd) {
1407 case IPP_CMD_M2M:
1408 for_each_ipp_ops(i) {
1409 /* source/destination memory list */
1410 head = &c_node->mem_list[i];
1411
1412 m_node = list_first_entry(head,
1413 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001414
1415 tbuf_id[i] = m_node->buf_id;
YoungJun Chocbc4c332013-06-12 10:44:40 +09001416 DRM_DEBUG_KMS("%s buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001417 i ? "dst" : "src", tbuf_id[i]);
1418
1419 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1420 if (ret)
1421 DRM_ERROR("failed to put m_node.\n");
1422 }
1423 break;
1424 case IPP_CMD_WB:
1425 /* clear buf for finding */
1426 memset(&qbuf, 0x0, sizeof(qbuf));
1427 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1428 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1429
1430 /* get memory node entry */
1431 m_node = ipp_find_mem_node(c_node, &qbuf);
1432 if (!m_node) {
1433 DRM_ERROR("empty memory node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001434 ret = -ENOMEM;
1435 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001436 }
1437
1438 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1439
1440 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1441 if (ret)
1442 DRM_ERROR("failed to put m_node.\n");
1443 break;
1444 case IPP_CMD_OUTPUT:
1445 /* source memory list */
1446 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1447
1448 m_node = list_first_entry(head,
1449 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001450
1451 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1452
1453 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1454 if (ret)
1455 DRM_ERROR("failed to put m_node.\n");
1456 break;
1457 default:
1458 DRM_ERROR("invalid operations.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001459 ret = -EINVAL;
1460 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001461 }
YoungJun Cho220db6f2014-05-26 10:17:20 +02001462 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001463
1464 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1465 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1466 tbuf_id[1], buf_id[1], property->prop_id);
1467
1468 /*
1469 * command node have event list of destination buffer
1470 * If destination buffer enqueue to mem list,
1471 * then we make event and link to event list tail.
1472 * so, we get first event for first enqueued buffer.
1473 */
1474 e = list_first_entry(&c_node->event_list,
1475 struct drm_exynos_ipp_send_event, base.link);
1476
Eunchul Kimcb471f142012-12-14 18:10:31 +09001477 do_gettimeofday(&now);
YoungJun Chocbc4c332013-06-12 10:44:40 +09001478 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001479 e->event.tv_sec = now.tv_sec;
1480 e->event.tv_usec = now.tv_usec;
1481 e->event.prop_id = property->prop_id;
1482
1483 /* set buffer id about source destination */
1484 for_each_ipp_ops(i)
1485 e->event.buf_id[i] = tbuf_id[i];
1486
1487 spin_lock_irqsave(&drm_dev->event_lock, flags);
1488 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1489 wake_up_interruptible(&e->base.file_priv->event_wait);
1490 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
YoungJun Cho4d520762014-05-26 10:17:21 +02001491 mutex_unlock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001492
YoungJun Chocbc4c332013-06-12 10:44:40 +09001493 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001494 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1495
1496 return 0;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001497
1498err_mem_unlock:
1499 mutex_unlock(&c_node->mem_lock);
YoungJun Cho4d520762014-05-26 10:17:21 +02001500err_event_unlock:
1501 mutex_unlock(&c_node->event_lock);
YoungJun Cho220db6f2014-05-26 10:17:20 +02001502 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001503}
1504
1505void ipp_sched_event(struct work_struct *work)
1506{
1507 struct drm_exynos_ipp_event_work *event_work =
Andrzej Hajda05afb1a2014-08-28 11:07:33 +02001508 container_of(work, struct drm_exynos_ipp_event_work, work);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001509 struct exynos_drm_ippdrv *ippdrv;
1510 struct drm_exynos_ipp_cmd_node *c_node;
1511 int ret;
1512
1513 if (!event_work) {
1514 DRM_ERROR("failed to get event_work.\n");
1515 return;
1516 }
1517
YoungJun Chocbc4c332013-06-12 10:44:40 +09001518 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001519
1520 ippdrv = event_work->ippdrv;
1521 if (!ippdrv) {
1522 DRM_ERROR("failed to get ipp driver.\n");
1523 return;
1524 }
1525
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001526 c_node = ippdrv->c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001527 if (!c_node) {
1528 DRM_ERROR("failed to get command node.\n");
1529 return;
1530 }
1531
1532 /*
1533 * IPP supports command thread, event thread synchronization.
1534 * If IPP close immediately from user land, then IPP make
1535 * synchronization with command thread, so make complete event.
1536 * or going out operations.
1537 */
1538 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001539 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1540 c_node->state, c_node->property.prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001541 goto err_completion;
1542 }
1543
Eunchul Kimcb471f142012-12-14 18:10:31 +09001544 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1545 if (ret) {
1546 DRM_ERROR("failed to send event.\n");
1547 goto err_completion;
1548 }
1549
1550err_completion:
1551 if (ipp_is_m2m_cmd(c_node->property.cmd))
1552 complete(&c_node->start_complete);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001553}
1554
1555static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1556{
1557 struct ipp_context *ctx = get_ipp_context(dev);
1558 struct exynos_drm_ippdrv *ippdrv;
1559 int ret, count = 0;
1560
Eunchul Kimcb471f142012-12-14 18:10:31 +09001561 /* get ipp driver entry */
1562 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1563 ippdrv->drm_dev = drm_dev;
1564
Andrzej Hajda12ff54d2014-07-03 15:10:36 +02001565 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
1566 if (ret < 0) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001567 DRM_ERROR("failed to create id.\n");
YoungJun Cho075436b2014-05-26 10:17:19 +02001568 goto err;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001569 }
Andrzej Hajda12ff54d2014-07-03 15:10:36 +02001570 ippdrv->prop_list.ipp_id = ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001571
YoungJun Chocbc4c332013-06-12 10:44:40 +09001572 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
Andrzej Hajda12ff54d2014-07-03 15:10:36 +02001573 count++, (int)ippdrv, ret);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001574
1575 /* store parent device for node */
1576 ippdrv->parent_dev = dev;
1577
1578 /* store event work queue and handler */
1579 ippdrv->event_workq = ctx->event_workq;
1580 ippdrv->sched_event = ipp_sched_event;
1581 INIT_LIST_HEAD(&ippdrv->cmd_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +02001582 mutex_init(&ippdrv->cmd_lock);
Eunchul Kimc12e2612012-12-14 17:58:54 +09001583
1584 if (is_drm_iommu_supported(drm_dev)) {
1585 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1586 if (ret) {
1587 DRM_ERROR("failed to activate iommu\n");
YoungJun Cho075436b2014-05-26 10:17:19 +02001588 goto err;
Eunchul Kimc12e2612012-12-14 17:58:54 +09001589 }
1590 }
Eunchul Kimcb471f142012-12-14 18:10:31 +09001591 }
1592
1593 return 0;
1594
YoungJun Cho075436b2014-05-26 10:17:19 +02001595err:
Eunchul Kimc12e2612012-12-14 17:58:54 +09001596 /* get ipp driver entry */
YoungJun Cho075436b2014-05-26 10:17:19 +02001597 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1598 drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001599 if (is_drm_iommu_supported(drm_dev))
1600 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1601
YoungJun Cho075436b2014-05-26 10:17:19 +02001602 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1603 ippdrv->prop_list.ipp_id);
1604 }
1605
Eunchul Kimcb471f142012-12-14 18:10:31 +09001606 return ret;
1607}
1608
1609static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1610{
Andrzej Hajdaa36ed462014-09-09 15:16:05 +02001611 struct exynos_drm_ippdrv *ippdrv, *t;
YoungJun Cho075436b2014-05-26 10:17:19 +02001612 struct ipp_context *ctx = get_ipp_context(dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001613
Eunchul Kimcb471f142012-12-14 18:10:31 +09001614 /* get ipp driver entry */
Andrzej Hajdaa36ed462014-09-09 15:16:05 +02001615 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001616 if (is_drm_iommu_supported(drm_dev))
1617 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1618
YoungJun Cho075436b2014-05-26 10:17:19 +02001619 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1620 ippdrv->prop_list.ipp_id);
1621
Eunchul Kimcb471f142012-12-14 18:10:31 +09001622 ippdrv->drm_dev = NULL;
1623 exynos_drm_ippdrv_unregister(ippdrv);
1624 }
1625}
1626
1627static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1628 struct drm_file *file)
1629{
1630 struct drm_exynos_file_private *file_priv = file->driver_priv;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001631
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001632 file_priv->ipp_dev = dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001633
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001634 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001635
1636 return 0;
1637}
1638
1639static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1640 struct drm_file *file)
1641{
Eunchul Kimcb471f142012-12-14 18:10:31 +09001642 struct exynos_drm_ippdrv *ippdrv = NULL;
YoungJun Cho075436b2014-05-26 10:17:19 +02001643 struct ipp_context *ctx = get_ipp_context(dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001644 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1645 int count = 0;
1646
Eunchul Kimcb471f142012-12-14 18:10:31 +09001647 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
YoungJun Cho7f5af052014-05-26 10:17:18 +02001648 mutex_lock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001649 list_for_each_entry_safe(c_node, tc_node,
1650 &ippdrv->cmd_list, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001651 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1652 count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001653
Andrzej Hajda21a825e2014-08-28 11:07:28 +02001654 if (c_node->filp == file) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001655 /*
1656 * userland goto unnormal state. process killed.
1657 * and close the file.
1658 * so, IPP didn't called stop cmd ctrl.
1659 * so, we are make stop operation in this state.
1660 */
1661 if (c_node->state == IPP_STATE_START) {
1662 ipp_stop_property(drm_dev, ippdrv,
1663 c_node);
1664 c_node->state = IPP_STATE_STOP;
1665 }
1666
1667 ippdrv->dedicated = false;
YoungJun Cho075436b2014-05-26 10:17:19 +02001668 ipp_clean_cmd_node(ctx, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001669 if (list_empty(&ippdrv->cmd_list))
1670 pm_runtime_put_sync(ippdrv->dev);
1671 }
1672 }
YoungJun Cho7f5af052014-05-26 10:17:18 +02001673 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001674 }
1675
Eunchul Kimcb471f142012-12-14 18:10:31 +09001676 return;
1677}
1678
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001679static int ipp_probe(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001680{
1681 struct device *dev = &pdev->dev;
1682 struct ipp_context *ctx;
1683 struct exynos_drm_subdrv *subdrv;
1684 int ret;
1685
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001686 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001687 if (!ctx)
1688 return -ENOMEM;
1689
Eunchul Kimcb471f142012-12-14 18:10:31 +09001690 mutex_init(&ctx->ipp_lock);
1691 mutex_init(&ctx->prop_lock);
1692
1693 idr_init(&ctx->ipp_idr);
1694 idr_init(&ctx->prop_idr);
1695
1696 /*
1697 * create single thread for ipp event
1698 * IPP supports event thread for IPP drivers.
1699 * IPP driver send event_work to this thread.
1700 * and IPP event thread send event to user process.
1701 */
1702 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1703 if (!ctx->event_workq) {
1704 dev_err(dev, "failed to create event workqueue\n");
Sachin Kamatbfb6ed22012-12-24 14:03:42 +05301705 return -EINVAL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001706 }
1707
1708 /*
1709 * create single thread for ipp command
1710 * IPP supports command thread for user process.
1711 * user process make command node using set property ioctl.
1712 * and make start_work and send this work to command thread.
1713 * and then this command thread start property.
1714 */
1715 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1716 if (!ctx->cmd_workq) {
1717 dev_err(dev, "failed to create cmd workqueue\n");
1718 ret = -EINVAL;
1719 goto err_event_workq;
1720 }
1721
1722 /* set sub driver informations */
1723 subdrv = &ctx->subdrv;
1724 subdrv->dev = dev;
1725 subdrv->probe = ipp_subdrv_probe;
1726 subdrv->remove = ipp_subdrv_remove;
1727 subdrv->open = ipp_subdrv_open;
1728 subdrv->close = ipp_subdrv_close;
1729
1730 platform_set_drvdata(pdev, ctx);
1731
1732 ret = exynos_drm_subdrv_register(subdrv);
1733 if (ret < 0) {
1734 DRM_ERROR("failed to register drm ipp device.\n");
1735 goto err_cmd_workq;
1736 }
1737
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001738 dev_info(dev, "drm ipp registered successfully.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001739
1740 return 0;
1741
1742err_cmd_workq:
1743 destroy_workqueue(ctx->cmd_workq);
1744err_event_workq:
1745 destroy_workqueue(ctx->event_workq);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001746 return ret;
1747}
1748
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001749static int ipp_remove(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001750{
1751 struct ipp_context *ctx = platform_get_drvdata(pdev);
1752
Eunchul Kimcb471f142012-12-14 18:10:31 +09001753 /* unregister sub driver */
1754 exynos_drm_subdrv_unregister(&ctx->subdrv);
1755
1756 /* remove,destroy ipp idr */
Eunchul Kimcb471f142012-12-14 18:10:31 +09001757 idr_destroy(&ctx->ipp_idr);
1758 idr_destroy(&ctx->prop_idr);
1759
1760 mutex_destroy(&ctx->ipp_lock);
1761 mutex_destroy(&ctx->prop_lock);
1762
1763 /* destroy command, event work queue */
1764 destroy_workqueue(ctx->cmd_workq);
1765 destroy_workqueue(ctx->event_workq);
1766
Eunchul Kimcb471f142012-12-14 18:10:31 +09001767 return 0;
1768}
1769
Eunchul Kimcb471f142012-12-14 18:10:31 +09001770struct platform_driver ipp_driver = {
1771 .probe = ipp_probe,
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001772 .remove = ipp_remove,
Eunchul Kimcb471f142012-12-14 18:10:31 +09001773 .driver = {
1774 .name = "exynos-drm-ipp",
1775 .owner = THIS_MODULE,
Eunchul Kimcb471f142012-12-14 18:10:31 +09001776 },
1777};
1778