blob: 0552f62295a2ebeefd8c11307e386eae8589a5de [file] [log] [blame]
Eunchul Kimcb471f142012-12-14 18:10:31 +09001/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090015#include <linux/platform_device.h>
16#include <linux/types.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090019
20#include <drm/drmP.h>
21#include <drm/exynos_drm.h>
22#include "exynos_drm_drv.h"
23#include "exynos_drm_gem.h"
24#include "exynos_drm_ipp.h"
Eunchul Kimc12e2612012-12-14 17:58:54 +090025#include "exynos_drm_iommu.h"
Eunchul Kimcb471f142012-12-14 18:10:31 +090026
27/*
Eunchul Kim6fe891f2012-12-22 17:49:26 +090028 * IPP stands for Image Post Processing and
Eunchul Kimcb471f142012-12-14 18:10:31 +090029 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
32 */
33
34/*
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
43 */
44
45#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
47
Seung-Woo Kim43f41902013-04-23 14:02:53 +090048/* platform device pointer for ipp device. */
49static struct platform_device *exynos_drm_ipp_pdev;
50
Eunchul Kimcb471f142012-12-14 18:10:31 +090051/*
52 * A structure of event.
53 *
54 * @base: base of event.
55 * @event: ipp event.
56 */
57struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
60};
61
62/*
63 * A structure of memory node.
64 *
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
71 */
72struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
75 u32 prop_id;
76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79};
80
81/*
82 * A structure of ipp context.
83 *
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
91 */
92struct ipp_context {
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
96 struct idr ipp_idr;
97 struct idr prop_idr;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
100};
101
102static LIST_HEAD(exynos_drm_ippdrv_list);
103static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
105
Seung-Woo Kim43f41902013-04-23 14:02:53 +0900106int exynos_platform_device_ipp_register(void)
107{
108 struct platform_device *pdev;
109
110 if (exynos_drm_ipp_pdev)
111 return -EEXIST;
112
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
114 if (IS_ERR(pdev))
115 return PTR_ERR(pdev);
116
117 exynos_drm_ipp_pdev = pdev;
118
119 return 0;
120}
121
122void exynos_platform_device_ipp_unregister(void)
123{
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
127 }
128}
129
Eunchul Kimcb471f142012-12-14 18:10:31 +0900130int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
131{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900132 mutex_lock(&exynos_drm_ippdrv_lock);
133 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
134 mutex_unlock(&exynos_drm_ippdrv_lock);
135
136 return 0;
137}
138
139int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
140{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900141 mutex_lock(&exynos_drm_ippdrv_lock);
142 list_del(&ippdrv->drv_list);
143 mutex_unlock(&exynos_drm_ippdrv_lock);
144
145 return 0;
146}
147
148static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
149 u32 *idp)
150{
151 int ret;
152
Eunchul Kimcb471f142012-12-14 18:10:31 +0900153 /* do the allocation under our mutexlock */
154 mutex_lock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800155 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900156 mutex_unlock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800157 if (ret < 0)
158 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900159
Tejun Heo8550cb22013-02-27 17:04:09 -0800160 *idp = ret;
161 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900162}
163
YoungJun Cho075436b2014-05-26 10:17:19 +0200164static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
165{
166 mutex_lock(lock);
167 idr_remove(id_idr, id);
168 mutex_unlock(lock);
169}
170
Eunchul Kimcb471f142012-12-14 18:10:31 +0900171static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172{
173 void *obj;
174
Eunchul Kimcb471f142012-12-14 18:10:31 +0900175 mutex_lock(lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900176 obj = idr_find(id_idr, id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900177 mutex_unlock(lock);
178
179 return obj;
180}
181
182static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
183 enum drm_exynos_ipp_cmd cmd)
184{
185 /*
186 * check dedicated flag and WB, OUTPUT operation with
187 * power on state.
188 */
189 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
190 !pm_runtime_suspended(ippdrv->dev)))
191 return true;
192
193 return false;
194}
195
196static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
197 struct drm_exynos_ipp_property *property)
198{
199 struct exynos_drm_ippdrv *ippdrv;
200 u32 ipp_id = property->ipp_id;
201
YoungJun Chocbc4c332013-06-12 10:44:40 +0900202 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900203
204 if (ipp_id) {
205 /* find ipp driver using idr */
206 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
207 ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200208 if (!ippdrv) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900209 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200210 return ERR_PTR(-ENODEV);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900211 }
212
213 /*
214 * WB, OUTPUT opertion not supported multi-operation.
215 * so, make dedicated state at set property ioctl.
216 * when ipp driver finished operations, clear dedicated flags.
217 */
218 if (ipp_check_dedicated(ippdrv, property->cmd)) {
219 DRM_ERROR("already used choose device.\n");
220 return ERR_PTR(-EBUSY);
221 }
222
223 /*
224 * This is necessary to find correct device in ipp drivers.
225 * ipp drivers have different abilities,
226 * so need to check property.
227 */
228 if (ippdrv->check_property &&
229 ippdrv->check_property(ippdrv->dev, property)) {
230 DRM_ERROR("not support property.\n");
231 return ERR_PTR(-EINVAL);
232 }
233
234 return ippdrv;
235 } else {
236 /*
237 * This case is search all ipp driver for finding.
238 * user application don't set ipp_id in this case,
239 * so ipp subsystem search correct driver in driver list.
240 */
241 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
242 if (ipp_check_dedicated(ippdrv, property->cmd)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900243 DRM_DEBUG_KMS("used device.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900244 continue;
245 }
246
247 if (ippdrv->check_property &&
248 ippdrv->check_property(ippdrv->dev, property)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900249 DRM_DEBUG_KMS("not support property.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900250 continue;
251 }
252
253 return ippdrv;
254 }
255
256 DRM_ERROR("not support ipp driver operations.\n");
257 }
258
259 return ERR_PTR(-ENODEV);
260}
261
262static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
263{
264 struct exynos_drm_ippdrv *ippdrv;
265 struct drm_exynos_ipp_cmd_node *c_node;
266 int count = 0;
267
YoungJun Chocbc4c332013-06-12 10:44:40 +0900268 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900269
Eunchul Kimcb471f142012-12-14 18:10:31 +0900270 /*
271 * This case is search ipp driver by prop_id handle.
272 * sometimes, ipp subsystem find driver by prop_id.
Geert Uytterhoeven9fca9ac2014-03-11 11:23:37 +0100273 * e.g PAUSE state, queue buf, command control.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900274 */
275 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900276 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900277
YoungJun Cho7f5af052014-05-26 10:17:18 +0200278 mutex_lock(&ippdrv->cmd_lock);
279 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
280 if (c_node->property.prop_id == prop_id) {
281 mutex_unlock(&ippdrv->cmd_lock);
YoungJun Choc66ce402014-05-26 10:17:15 +0200282 return ippdrv;
YoungJun Cho7f5af052014-05-26 10:17:18 +0200283 }
Eunchul Kimcb471f142012-12-14 18:10:31 +0900284 }
YoungJun Cho7f5af052014-05-26 10:17:18 +0200285 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900286 }
287
288 return ERR_PTR(-ENODEV);
289}
290
291int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292 struct drm_file *file)
293{
294 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200295 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900296 struct ipp_context *ctx = get_ipp_context(dev);
297 struct drm_exynos_ipp_prop_list *prop_list = data;
298 struct exynos_drm_ippdrv *ippdrv;
299 int count = 0;
300
Eunchul Kimcb471f142012-12-14 18:10:31 +0900301 if (!ctx) {
302 DRM_ERROR("invalid context.\n");
303 return -EINVAL;
304 }
305
306 if (!prop_list) {
307 DRM_ERROR("invalid property parameter.\n");
308 return -EINVAL;
309 }
310
YoungJun Chocbc4c332013-06-12 10:44:40 +0900311 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900312
313 if (!prop_list->ipp_id) {
314 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
315 count++;
YoungJun Cho7f5af052014-05-26 10:17:18 +0200316
Eunchul Kimcb471f142012-12-14 18:10:31 +0900317 /*
318 * Supports ippdrv list count for user application.
319 * First step user application getting ippdrv count.
320 * and second step getting ippdrv capability using ipp_id.
321 */
322 prop_list->count = count;
323 } else {
324 /*
325 * Getting ippdrv capability by ipp_id.
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900326 * some device not supported wb, output interface.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900327 * so, user application detect correct ipp driver
328 * using this ioctl.
329 */
330 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
331 prop_list->ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200332 if (!ippdrv) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900333 DRM_ERROR("not found ipp%d driver.\n",
334 prop_list->ipp_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200335 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900336 }
337
Andrzej Hajda31646052014-05-19 12:54:05 +0200338 *prop_list = ippdrv->prop_list;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900339 }
340
341 return 0;
342}
343
344static void ipp_print_property(struct drm_exynos_ipp_property *property,
345 int idx)
346{
347 struct drm_exynos_ipp_config *config = &property->config[idx];
348 struct drm_exynos_pos *pos = &config->pos;
349 struct drm_exynos_sz *sz = &config->sz;
350
YoungJun Chocbc4c332013-06-12 10:44:40 +0900351 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
352 property->prop_id, idx ? "dst" : "src", config->fmt);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900353
YoungJun Chocbc4c332013-06-12 10:44:40 +0900354 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
355 pos->x, pos->y, pos->w, pos->h,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900356 sz->hsize, sz->vsize, config->flip, config->degree);
357}
358
359static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
360{
361 struct exynos_drm_ippdrv *ippdrv;
362 struct drm_exynos_ipp_cmd_node *c_node;
363 u32 prop_id = property->prop_id;
364
YoungJun Chocbc4c332013-06-12 10:44:40 +0900365 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900366
367 ippdrv = ipp_find_drv_by_handle(prop_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530368 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900369 DRM_ERROR("failed to get ipp driver.\n");
370 return -EINVAL;
371 }
372
373 /*
374 * Find command node using command list in ippdrv.
375 * when we find this command no using prop_id.
376 * return property information set in this command node.
377 */
YoungJun Cho7f5af052014-05-26 10:17:18 +0200378 mutex_lock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900379 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
380 if ((c_node->property.prop_id == prop_id) &&
381 (c_node->state == IPP_STATE_STOP)) {
YoungJun Cho7f5af052014-05-26 10:17:18 +0200382 mutex_unlock(&ippdrv->cmd_lock);
YoungJun Chocbc4c332013-06-12 10:44:40 +0900383 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
384 property->cmd, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900385
386 c_node->property = *property;
387 return 0;
388 }
389 }
YoungJun Cho7f5af052014-05-26 10:17:18 +0200390 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900391
392 DRM_ERROR("failed to search property.\n");
393
394 return -EINVAL;
395}
396
397static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
398{
399 struct drm_exynos_ipp_cmd_work *cmd_work;
400
Eunchul Kimcb471f142012-12-14 18:10:31 +0900401 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900402 if (!cmd_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900403 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900404
405 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
406
407 return cmd_work;
408}
409
410static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
411{
412 struct drm_exynos_ipp_event_work *event_work;
413
Eunchul Kimcb471f142012-12-14 18:10:31 +0900414 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900415 if (!event_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900416 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900417
Andrzej Hajda60b61c22014-07-03 15:10:26 +0200418 INIT_WORK(&event_work->work, ipp_sched_event);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900419
420 return event_work;
421}
422
423int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
424 struct drm_file *file)
425{
426 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200427 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900428 struct ipp_context *ctx = get_ipp_context(dev);
429 struct drm_exynos_ipp_property *property = data;
430 struct exynos_drm_ippdrv *ippdrv;
431 struct drm_exynos_ipp_cmd_node *c_node;
432 int ret, i;
433
Eunchul Kimcb471f142012-12-14 18:10:31 +0900434 if (!ctx) {
435 DRM_ERROR("invalid context.\n");
436 return -EINVAL;
437 }
438
439 if (!property) {
440 DRM_ERROR("invalid property parameter.\n");
441 return -EINVAL;
442 }
443
444 /*
445 * This is log print for user application property.
446 * user application set various property.
447 */
448 for_each_ipp_ops(i)
449 ipp_print_property(property, i);
450
451 /*
452 * set property ioctl generated new prop_id.
453 * but in this case already asigned prop_id using old set property.
454 * e.g PAUSE state. this case supports find current prop_id and use it
455 * instead of allocation.
456 */
457 if (property->prop_id) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900458 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900459 return ipp_find_and_set_property(property);
460 }
461
462 /* find ipp driver using ipp id */
463 ippdrv = ipp_find_driver(ctx, property);
Sachin Kamatf0250452013-04-29 12:27:06 +0530464 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900465 DRM_ERROR("failed to get ipp driver.\n");
466 return -EINVAL;
467 }
468
469 /* allocate command node */
470 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900471 if (!c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900472 return -ENOMEM;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900473
474 /* create property id */
475 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
476 &property->prop_id);
477 if (ret) {
478 DRM_ERROR("failed to create id.\n");
479 goto err_clear;
480 }
481
YoungJun Chocbc4c332013-06-12 10:44:40 +0900482 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
483 property->prop_id, property->cmd, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900484
485 /* stored property information and ippdrv in private data */
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200486 c_node->dev = dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900487 c_node->property = *property;
488 c_node->state = IPP_STATE_IDLE;
489
490 c_node->start_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530491 if (IS_ERR(c_node->start_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900492 DRM_ERROR("failed to create start work.\n");
YoungJun Cho075436b2014-05-26 10:17:19 +0200493 goto err_remove_id;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900494 }
495
496 c_node->stop_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530497 if (IS_ERR(c_node->stop_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900498 DRM_ERROR("failed to create stop work.\n");
499 goto err_free_start;
500 }
501
502 c_node->event_work = ipp_create_event_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530503 if (IS_ERR(c_node->event_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900504 DRM_ERROR("failed to create event work.\n");
505 goto err_free_stop;
506 }
507
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200508 mutex_init(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900509 mutex_init(&c_node->mem_lock);
510 mutex_init(&c_node->event_lock);
511
512 init_completion(&c_node->start_complete);
513 init_completion(&c_node->stop_complete);
514
515 for_each_ipp_ops(i)
516 INIT_LIST_HEAD(&c_node->mem_list[i]);
517
518 INIT_LIST_HEAD(&c_node->event_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +0200519 mutex_lock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900520 list_add_tail(&c_node->list, &ippdrv->cmd_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +0200521 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900522
523 /* make dedicated state without m2m */
524 if (!ipp_is_m2m_cmd(property->cmd))
525 ippdrv->dedicated = true;
526
527 return 0;
528
529err_free_stop:
530 kfree(c_node->stop_work);
531err_free_start:
532 kfree(c_node->start_work);
YoungJun Cho075436b2014-05-26 10:17:19 +0200533err_remove_id:
534 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900535err_clear:
536 kfree(c_node);
537 return ret;
538}
539
YoungJun Cho075436b2014-05-26 10:17:19 +0200540static void ipp_clean_cmd_node(struct ipp_context *ctx,
541 struct drm_exynos_ipp_cmd_node *c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900542{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900543 /* delete list */
544 list_del(&c_node->list);
545
YoungJun Cho075436b2014-05-26 10:17:19 +0200546 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
547 c_node->property.prop_id);
548
Eunchul Kimcb471f142012-12-14 18:10:31 +0900549 /* destroy mutex */
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200550 mutex_destroy(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900551 mutex_destroy(&c_node->mem_lock);
552 mutex_destroy(&c_node->event_lock);
553
554 /* free command node */
555 kfree(c_node->start_work);
556 kfree(c_node->stop_work);
557 kfree(c_node->event_work);
558 kfree(c_node);
559}
560
Andrzej Hajdafb5ee012014-07-03 15:10:32 +0200561static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900562{
Andrzej Hajdafb5ee012014-07-03 15:10:32 +0200563 switch (c_node->property.cmd) {
564 case IPP_CMD_WB:
565 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
566 case IPP_CMD_OUTPUT:
567 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
568 case IPP_CMD_M2M:
569 default:
570 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
571 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900572 }
Eunchul Kimcb471f142012-12-14 18:10:31 +0900573}
574
575static struct drm_exynos_ipp_mem_node
576 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
577 struct drm_exynos_ipp_queue_buf *qbuf)
578{
579 struct drm_exynos_ipp_mem_node *m_node;
580 struct list_head *head;
581 int count = 0;
582
YoungJun Chocbc4c332013-06-12 10:44:40 +0900583 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900584
585 /* source/destination memory list */
586 head = &c_node->mem_list[qbuf->ops_id];
587
588 /* find memory node from memory list */
589 list_for_each_entry(m_node, head, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900590 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900591
592 /* compare buffer id */
593 if (m_node->buf_id == qbuf->buf_id)
594 return m_node;
595 }
596
597 return NULL;
598}
599
600static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
601 struct drm_exynos_ipp_cmd_node *c_node,
602 struct drm_exynos_ipp_mem_node *m_node)
603{
604 struct exynos_drm_ipp_ops *ops = NULL;
605 int ret = 0;
606
YoungJun Chocbc4c332013-06-12 10:44:40 +0900607 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900608
609 if (!m_node) {
610 DRM_ERROR("invalid queue node.\n");
611 return -EFAULT;
612 }
613
YoungJun Chocbc4c332013-06-12 10:44:40 +0900614 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900615
616 /* get operations callback */
617 ops = ippdrv->ops[m_node->ops_id];
618 if (!ops) {
619 DRM_ERROR("not support ops.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +0200620 return -EFAULT;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900621 }
622
623 /* set address and enable irq */
624 if (ops->set_addr) {
625 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
626 m_node->buf_id, IPP_BUF_ENQUEUE);
627 if (ret) {
628 DRM_ERROR("failed to set addr.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +0200629 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900630 }
631 }
632
Eunchul Kimcb471f142012-12-14 18:10:31 +0900633 return ret;
634}
635
636static struct drm_exynos_ipp_mem_node
637 *ipp_get_mem_node(struct drm_device *drm_dev,
638 struct drm_file *file,
639 struct drm_exynos_ipp_cmd_node *c_node,
640 struct drm_exynos_ipp_queue_buf *qbuf)
641{
642 struct drm_exynos_ipp_mem_node *m_node;
Andrzej Hajda73b00232014-07-03 15:10:30 +0200643 struct drm_exynos_ipp_buf_info *buf_info;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900644 int i;
645
Eunchul Kimcb471f142012-12-14 18:10:31 +0900646 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900647 if (!m_node)
YoungJun Cho220db6f2014-05-26 10:17:20 +0200648 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900649
Andrzej Hajda73b00232014-07-03 15:10:30 +0200650 buf_info = &m_node->buf_info;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900651
652 /* operations, buffer id */
653 m_node->ops_id = qbuf->ops_id;
654 m_node->prop_id = qbuf->prop_id;
655 m_node->buf_id = qbuf->buf_id;
656
YoungJun Chocbc4c332013-06-12 10:44:40 +0900657 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
658 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900659
660 for_each_ipp_planar(i) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900661 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900662
663 /* get dma address by handle */
664 if (qbuf->handle[i]) {
Andrzej Hajdaa8ea17f2014-07-03 15:10:29 +0200665 dma_addr_t *addr;
666
Eunchul Kimcb471f142012-12-14 18:10:31 +0900667 addr = exynos_drm_gem_get_dma_addr(drm_dev,
668 qbuf->handle[i], file);
669 if (IS_ERR(addr)) {
670 DRM_ERROR("failed to get addr.\n");
671 goto err_clear;
672 }
673
Andrzej Hajda73b00232014-07-03 15:10:30 +0200674 buf_info->handles[i] = qbuf->handle[i];
675 buf_info->base[i] = *addr;
676 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
677 buf_info->base[i], buf_info->handles[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900678 }
679 }
680
681 m_node->filp = file;
YoungJun Cho220db6f2014-05-26 10:17:20 +0200682 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900683 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900684 mutex_unlock(&c_node->mem_lock);
YoungJun Cho220db6f2014-05-26 10:17:20 +0200685
Eunchul Kimcb471f142012-12-14 18:10:31 +0900686 return m_node;
687
688err_clear:
689 kfree(m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900690 return ERR_PTR(-EFAULT);
691}
692
693static int ipp_put_mem_node(struct drm_device *drm_dev,
694 struct drm_exynos_ipp_cmd_node *c_node,
695 struct drm_exynos_ipp_mem_node *m_node)
696{
697 int i;
698
YoungJun Chocbc4c332013-06-12 10:44:40 +0900699 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900700
701 if (!m_node) {
702 DRM_ERROR("invalid dequeue node.\n");
703 return -EFAULT;
704 }
705
YoungJun Chocbc4c332013-06-12 10:44:40 +0900706 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900707
708 /* put gem buffer */
709 for_each_ipp_planar(i) {
710 unsigned long handle = m_node->buf_info.handles[i];
711 if (handle)
712 exynos_drm_gem_put_dma_addr(drm_dev, handle,
713 m_node->filp);
714 }
715
716 /* delete list in queue */
717 list_del(&m_node->list);
718 kfree(m_node);
719
Eunchul Kimcb471f142012-12-14 18:10:31 +0900720 return 0;
721}
722
723static void ipp_free_event(struct drm_pending_event *event)
724{
725 kfree(event);
726}
727
728static int ipp_get_event(struct drm_device *drm_dev,
729 struct drm_file *file,
730 struct drm_exynos_ipp_cmd_node *c_node,
731 struct drm_exynos_ipp_queue_buf *qbuf)
732{
733 struct drm_exynos_ipp_send_event *e;
734 unsigned long flags;
735
YoungJun Chocbc4c332013-06-12 10:44:40 +0900736 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900737
738 e = kzalloc(sizeof(*e), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900739 if (!e) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900740 spin_lock_irqsave(&drm_dev->event_lock, flags);
741 file->event_space += sizeof(e->event);
742 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
743 return -ENOMEM;
744 }
745
746 /* make event */
747 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
748 e->event.base.length = sizeof(e->event);
749 e->event.user_data = qbuf->user_data;
750 e->event.prop_id = qbuf->prop_id;
751 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
752 e->base.event = &e->event.base;
753 e->base.file_priv = file;
754 e->base.destroy = ipp_free_event;
YoungJun Cho4d520762014-05-26 10:17:21 +0200755 mutex_lock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900756 list_add_tail(&e->base.link, &c_node->event_list);
YoungJun Cho4d520762014-05-26 10:17:21 +0200757 mutex_unlock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900758
759 return 0;
760}
761
762static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
763 struct drm_exynos_ipp_queue_buf *qbuf)
764{
765 struct drm_exynos_ipp_send_event *e, *te;
766 int count = 0;
767
YoungJun Cho4d520762014-05-26 10:17:21 +0200768 mutex_lock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900769 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900770 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900771
772 /*
Sachin Kamat4fe25b82014-01-16 10:00:23 +0530773 * qbuf == NULL condition means all event deletion.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900774 * stop operations want to delete all event list.
775 * another case delete only same buf id.
776 */
777 if (!qbuf) {
778 /* delete list */
779 list_del(&e->base.link);
780 kfree(e);
781 }
782
783 /* compare buffer id */
784 if (qbuf && (qbuf->buf_id ==
785 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
786 /* delete list */
787 list_del(&e->base.link);
788 kfree(e);
YoungJun Cho4d520762014-05-26 10:17:21 +0200789 goto out_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900790 }
791 }
YoungJun Cho4d520762014-05-26 10:17:21 +0200792
793out_unlock:
794 mutex_unlock(&c_node->event_lock);
795 return;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900796}
797
Sachin Kamat0bc4a0a2013-01-14 12:29:10 +0530798static void ipp_handle_cmd_work(struct device *dev,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900799 struct exynos_drm_ippdrv *ippdrv,
800 struct drm_exynos_ipp_cmd_work *cmd_work,
801 struct drm_exynos_ipp_cmd_node *c_node)
802{
803 struct ipp_context *ctx = get_ipp_context(dev);
804
805 cmd_work->ippdrv = ippdrv;
806 cmd_work->c_node = c_node;
807 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
808}
809
810static int ipp_queue_buf_with_run(struct device *dev,
811 struct drm_exynos_ipp_cmd_node *c_node,
812 struct drm_exynos_ipp_mem_node *m_node,
813 struct drm_exynos_ipp_queue_buf *qbuf)
814{
815 struct exynos_drm_ippdrv *ippdrv;
816 struct drm_exynos_ipp_property *property;
817 struct exynos_drm_ipp_ops *ops;
818 int ret;
819
Eunchul Kimcb471f142012-12-14 18:10:31 +0900820 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530821 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900822 DRM_ERROR("failed to get ipp driver.\n");
823 return -EFAULT;
824 }
825
826 ops = ippdrv->ops[qbuf->ops_id];
827 if (!ops) {
828 DRM_ERROR("failed to get ops.\n");
829 return -EFAULT;
830 }
831
832 property = &c_node->property;
833
834 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900835 DRM_DEBUG_KMS("bypass for invalid state.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900836 return 0;
837 }
838
YoungJun Cho220db6f2014-05-26 10:17:20 +0200839 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900840 if (!ipp_check_mem_list(c_node)) {
YoungJun Cho220db6f2014-05-26 10:17:20 +0200841 mutex_unlock(&c_node->mem_lock);
YoungJun Chocbc4c332013-06-12 10:44:40 +0900842 DRM_DEBUG_KMS("empty memory.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900843 return 0;
844 }
845
846 /*
847 * If set destination buffer and enabled clock,
848 * then m2m operations need start operations at queue_buf
849 */
850 if (ipp_is_m2m_cmd(property->cmd)) {
851 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
852
853 cmd_work->ctrl = IPP_CTRL_PLAY;
854 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
855 } else {
856 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
857 if (ret) {
YoungJun Cho220db6f2014-05-26 10:17:20 +0200858 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900859 DRM_ERROR("failed to set m node.\n");
860 return ret;
861 }
862 }
YoungJun Cho220db6f2014-05-26 10:17:20 +0200863 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900864
865 return 0;
866}
867
868static void ipp_clean_queue_buf(struct drm_device *drm_dev,
869 struct drm_exynos_ipp_cmd_node *c_node,
870 struct drm_exynos_ipp_queue_buf *qbuf)
871{
872 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
873
YoungJun Choc66ce402014-05-26 10:17:15 +0200874 /* delete list */
YoungJun Cho220db6f2014-05-26 10:17:20 +0200875 mutex_lock(&c_node->mem_lock);
YoungJun Choc66ce402014-05-26 10:17:15 +0200876 list_for_each_entry_safe(m_node, tm_node,
877 &c_node->mem_list[qbuf->ops_id], list) {
878 if (m_node->buf_id == qbuf->buf_id &&
879 m_node->ops_id == qbuf->ops_id)
880 ipp_put_mem_node(drm_dev, c_node, m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900881 }
YoungJun Cho220db6f2014-05-26 10:17:20 +0200882 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900883}
884
885int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
886 struct drm_file *file)
887{
888 struct drm_exynos_file_private *file_priv = file->driver_priv;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +0200889 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900890 struct ipp_context *ctx = get_ipp_context(dev);
891 struct drm_exynos_ipp_queue_buf *qbuf = data;
892 struct drm_exynos_ipp_cmd_node *c_node;
893 struct drm_exynos_ipp_mem_node *m_node;
894 int ret;
895
Eunchul Kimcb471f142012-12-14 18:10:31 +0900896 if (!qbuf) {
897 DRM_ERROR("invalid buf parameter.\n");
898 return -EINVAL;
899 }
900
901 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
902 DRM_ERROR("invalid ops parameter.\n");
903 return -EINVAL;
904 }
905
YoungJun Chocbc4c332013-06-12 10:44:40 +0900906 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
907 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
Eunchul Kimcb471f142012-12-14 18:10:31 +0900908 qbuf->buf_id, qbuf->buf_type);
909
910 /* find command node */
911 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
912 qbuf->prop_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200913 if (!c_node) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900914 DRM_ERROR("failed to get command node.\n");
Andrzej Hajda134f0e92014-07-03 15:10:34 +0200915 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900916 }
917
918 /* buffer control */
919 switch (qbuf->buf_type) {
920 case IPP_BUF_ENQUEUE:
921 /* get memory node */
922 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
923 if (IS_ERR(m_node)) {
924 DRM_ERROR("failed to get m_node.\n");
925 return PTR_ERR(m_node);
926 }
927
928 /*
929 * first step get event for destination buffer.
930 * and second step when M2M case run with destination buffer
931 * if needed.
932 */
933 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
934 /* get event for destination buffer */
935 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
936 if (ret) {
937 DRM_ERROR("failed to get event.\n");
938 goto err_clean_node;
939 }
940
941 /*
942 * M2M case run play control for streaming feature.
943 * other case set address and waiting.
944 */
945 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
946 if (ret) {
947 DRM_ERROR("failed to run command.\n");
948 goto err_clean_node;
949 }
950 }
951 break;
952 case IPP_BUF_DEQUEUE:
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200953 mutex_lock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900954
955 /* put event for destination buffer */
956 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
957 ipp_put_event(c_node, qbuf);
958
959 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
960
YoungJun Cho4e4fe552014-05-26 10:17:17 +0200961 mutex_unlock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900962 break;
963 default:
964 DRM_ERROR("invalid buffer control.\n");
965 return -EINVAL;
966 }
967
968 return 0;
969
970err_clean_node:
971 DRM_ERROR("clean memory nodes.\n");
972
973 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
974 return ret;
975}
976
977static bool exynos_drm_ipp_check_valid(struct device *dev,
978 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
979{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900980 if (ctrl != IPP_CTRL_PLAY) {
981 if (pm_runtime_suspended(dev)) {
982 DRM_ERROR("pm:runtime_suspended.\n");
983 goto err_status;
984 }
985 }
986
987 switch (ctrl) {
988 case IPP_CTRL_PLAY:
989 if (state != IPP_STATE_IDLE)
990 goto err_status;
991 break;
992 case IPP_CTRL_STOP:
993 if (state == IPP_STATE_STOP)
994 goto err_status;
995 break;
996 case IPP_CTRL_PAUSE:
997 if (state != IPP_STATE_START)
998 goto err_status;
999 break;
1000 case IPP_CTRL_RESUME:
1001 if (state != IPP_STATE_STOP)
1002 goto err_status;
1003 break;
1004 default:
1005 DRM_ERROR("invalid state.\n");
1006 goto err_status;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001007 }
1008
1009 return true;
1010
1011err_status:
1012 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1013 return false;
1014}
1015
1016int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1017 struct drm_file *file)
1018{
1019 struct drm_exynos_file_private *file_priv = file->driver_priv;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001020 struct exynos_drm_ippdrv *ippdrv = NULL;
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001021 struct device *dev = file_priv->ipp_dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001022 struct ipp_context *ctx = get_ipp_context(dev);
1023 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1024 struct drm_exynos_ipp_cmd_work *cmd_work;
1025 struct drm_exynos_ipp_cmd_node *c_node;
1026
Eunchul Kimcb471f142012-12-14 18:10:31 +09001027 if (!ctx) {
1028 DRM_ERROR("invalid context.\n");
1029 return -EINVAL;
1030 }
1031
1032 if (!cmd_ctrl) {
1033 DRM_ERROR("invalid control parameter.\n");
1034 return -EINVAL;
1035 }
1036
YoungJun Chocbc4c332013-06-12 10:44:40 +09001037 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001038 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1039
1040 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1041 if (IS_ERR(ippdrv)) {
1042 DRM_ERROR("failed to get ipp driver.\n");
1043 return PTR_ERR(ippdrv);
1044 }
1045
1046 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1047 cmd_ctrl->prop_id);
Andrzej Hajda134f0e92014-07-03 15:10:34 +02001048 if (!c_node) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001049 DRM_ERROR("invalid command node list.\n");
Andrzej Hajda134f0e92014-07-03 15:10:34 +02001050 return -ENODEV;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001051 }
1052
1053 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1054 c_node->state)) {
1055 DRM_ERROR("invalid state.\n");
1056 return -EINVAL;
1057 }
1058
1059 switch (cmd_ctrl->ctrl) {
1060 case IPP_CTRL_PLAY:
1061 if (pm_runtime_suspended(ippdrv->dev))
1062 pm_runtime_get_sync(ippdrv->dev);
YoungJun Choebaf05c2014-05-26 10:17:16 +02001063
Eunchul Kimcb471f142012-12-14 18:10:31 +09001064 c_node->state = IPP_STATE_START;
1065
1066 cmd_work = c_node->start_work;
1067 cmd_work->ctrl = cmd_ctrl->ctrl;
1068 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001069 break;
1070 case IPP_CTRL_STOP:
1071 cmd_work = c_node->stop_work;
1072 cmd_work->ctrl = cmd_ctrl->ctrl;
1073 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1074
1075 if (!wait_for_completion_timeout(&c_node->stop_complete,
1076 msecs_to_jiffies(300))) {
1077 DRM_ERROR("timeout stop:prop_id[%d]\n",
1078 c_node->property.prop_id);
1079 }
1080
1081 c_node->state = IPP_STATE_STOP;
1082 ippdrv->dedicated = false;
YoungJun Cho7f5af052014-05-26 10:17:18 +02001083 mutex_lock(&ippdrv->cmd_lock);
YoungJun Cho075436b2014-05-26 10:17:19 +02001084 ipp_clean_cmd_node(ctx, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001085
1086 if (list_empty(&ippdrv->cmd_list))
1087 pm_runtime_put_sync(ippdrv->dev);
YoungJun Cho7f5af052014-05-26 10:17:18 +02001088 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001089 break;
1090 case IPP_CTRL_PAUSE:
1091 cmd_work = c_node->stop_work;
1092 cmd_work->ctrl = cmd_ctrl->ctrl;
1093 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1094
1095 if (!wait_for_completion_timeout(&c_node->stop_complete,
1096 msecs_to_jiffies(200))) {
1097 DRM_ERROR("timeout stop:prop_id[%d]\n",
1098 c_node->property.prop_id);
1099 }
1100
1101 c_node->state = IPP_STATE_STOP;
1102 break;
1103 case IPP_CTRL_RESUME:
1104 c_node->state = IPP_STATE_START;
1105 cmd_work = c_node->start_work;
1106 cmd_work->ctrl = cmd_ctrl->ctrl;
1107 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1108 break;
1109 default:
1110 DRM_ERROR("could not support this state currently.\n");
1111 return -EINVAL;
1112 }
1113
YoungJun Chocbc4c332013-06-12 10:44:40 +09001114 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001115 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1116
1117 return 0;
1118}
1119
1120int exynos_drm_ippnb_register(struct notifier_block *nb)
1121{
1122 return blocking_notifier_chain_register(
1123 &exynos_drm_ippnb_list, nb);
1124}
1125
1126int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1127{
1128 return blocking_notifier_chain_unregister(
1129 &exynos_drm_ippnb_list, nb);
1130}
1131
1132int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1133{
1134 return blocking_notifier_call_chain(
1135 &exynos_drm_ippnb_list, val, v);
1136}
1137
1138static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1139 struct drm_exynos_ipp_property *property)
1140{
1141 struct exynos_drm_ipp_ops *ops = NULL;
1142 bool swap = false;
1143 int ret, i;
1144
1145 if (!property) {
1146 DRM_ERROR("invalid property parameter.\n");
1147 return -EINVAL;
1148 }
1149
YoungJun Chocbc4c332013-06-12 10:44:40 +09001150 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001151
1152 /* reset h/w block */
1153 if (ippdrv->reset &&
1154 ippdrv->reset(ippdrv->dev)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001155 return -EINVAL;
1156 }
1157
1158 /* set source,destination operations */
1159 for_each_ipp_ops(i) {
1160 struct drm_exynos_ipp_config *config =
1161 &property->config[i];
1162
1163 ops = ippdrv->ops[i];
1164 if (!ops || !config) {
1165 DRM_ERROR("not support ops and config.\n");
1166 return -EINVAL;
1167 }
1168
1169 /* set format */
1170 if (ops->set_fmt) {
1171 ret = ops->set_fmt(ippdrv->dev, config->fmt);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001172 if (ret)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001173 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001174 }
1175
1176 /* set transform for rotation, flip */
1177 if (ops->set_transf) {
1178 ret = ops->set_transf(ippdrv->dev, config->degree,
1179 config->flip, &swap);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001180 if (ret)
1181 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001182 }
1183
1184 /* set size */
1185 if (ops->set_size) {
1186 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1187 &config->sz);
Andrzej Hajda57ace332014-07-03 15:10:35 +02001188 if (ret)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001189 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001190 }
1191 }
1192
1193 return 0;
1194}
1195
1196static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1197 struct drm_exynos_ipp_cmd_node *c_node)
1198{
1199 struct drm_exynos_ipp_mem_node *m_node;
1200 struct drm_exynos_ipp_property *property = &c_node->property;
1201 struct list_head *head;
1202 int ret, i;
1203
YoungJun Chocbc4c332013-06-12 10:44:40 +09001204 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001205
1206 /* store command info in ippdrv */
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001207 ippdrv->c_node = c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001208
YoungJun Cho220db6f2014-05-26 10:17:20 +02001209 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001210 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001211 DRM_DEBUG_KMS("empty memory.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001212 ret = -ENOMEM;
1213 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001214 }
1215
1216 /* set current property in ippdrv */
1217 ret = ipp_set_property(ippdrv, property);
1218 if (ret) {
1219 DRM_ERROR("failed to set property.\n");
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001220 ippdrv->c_node = NULL;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001221 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001222 }
1223
1224 /* check command */
1225 switch (property->cmd) {
1226 case IPP_CMD_M2M:
1227 for_each_ipp_ops(i) {
1228 /* source/destination memory list */
1229 head = &c_node->mem_list[i];
1230
1231 m_node = list_first_entry(head,
1232 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001233
YoungJun Chocbc4c332013-06-12 10:44:40 +09001234 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001235
1236 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1237 if (ret) {
1238 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001239 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001240 }
1241 }
1242 break;
1243 case IPP_CMD_WB:
1244 /* destination memory list */
1245 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1246
1247 list_for_each_entry(m_node, head, list) {
1248 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1249 if (ret) {
1250 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001251 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001252 }
1253 }
1254 break;
1255 case IPP_CMD_OUTPUT:
1256 /* source memory list */
1257 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1258
1259 list_for_each_entry(m_node, head, list) {
1260 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1261 if (ret) {
1262 DRM_ERROR("failed to set m node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001263 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001264 }
1265 }
1266 break;
1267 default:
1268 DRM_ERROR("invalid operations.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001269 ret = -EINVAL;
1270 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001271 }
YoungJun Cho220db6f2014-05-26 10:17:20 +02001272 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001273
YoungJun Chocbc4c332013-06-12 10:44:40 +09001274 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001275
1276 /* start operations */
1277 if (ippdrv->start) {
1278 ret = ippdrv->start(ippdrv->dev, property->cmd);
1279 if (ret) {
1280 DRM_ERROR("failed to start ops.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001281 ippdrv->c_node = NULL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001282 return ret;
1283 }
1284 }
1285
1286 return 0;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001287
1288err_unlock:
1289 mutex_unlock(&c_node->mem_lock);
1290 ippdrv->c_node = NULL;
1291 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001292}
1293
1294static int ipp_stop_property(struct drm_device *drm_dev,
1295 struct exynos_drm_ippdrv *ippdrv,
1296 struct drm_exynos_ipp_cmd_node *c_node)
1297{
1298 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1299 struct drm_exynos_ipp_property *property = &c_node->property;
1300 struct list_head *head;
1301 int ret = 0, i;
1302
YoungJun Chocbc4c332013-06-12 10:44:40 +09001303 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001304
1305 /* put event */
1306 ipp_put_event(c_node, NULL);
1307
YoungJun Cho220db6f2014-05-26 10:17:20 +02001308 mutex_lock(&c_node->mem_lock);
1309
Eunchul Kimcb471f142012-12-14 18:10:31 +09001310 /* check command */
1311 switch (property->cmd) {
1312 case IPP_CMD_M2M:
1313 for_each_ipp_ops(i) {
1314 /* source/destination memory list */
1315 head = &c_node->mem_list[i];
1316
Eunchul Kimcb471f142012-12-14 18:10:31 +09001317 list_for_each_entry_safe(m_node, tm_node,
1318 head, list) {
1319 ret = ipp_put_mem_node(drm_dev, c_node,
1320 m_node);
1321 if (ret) {
1322 DRM_ERROR("failed to put m_node.\n");
1323 goto err_clear;
1324 }
1325 }
1326 }
1327 break;
1328 case IPP_CMD_WB:
1329 /* destination memory list */
1330 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1331
Eunchul Kimcb471f142012-12-14 18:10:31 +09001332 list_for_each_entry_safe(m_node, tm_node, head, list) {
1333 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1334 if (ret) {
1335 DRM_ERROR("failed to put m_node.\n");
1336 goto err_clear;
1337 }
1338 }
1339 break;
1340 case IPP_CMD_OUTPUT:
1341 /* source memory list */
1342 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1343
Eunchul Kimcb471f142012-12-14 18:10:31 +09001344 list_for_each_entry_safe(m_node, tm_node, head, list) {
1345 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1346 if (ret) {
1347 DRM_ERROR("failed to put m_node.\n");
1348 goto err_clear;
1349 }
1350 }
1351 break;
1352 default:
1353 DRM_ERROR("invalid operations.\n");
1354 ret = -EINVAL;
1355 goto err_clear;
1356 }
1357
1358err_clear:
YoungJun Cho220db6f2014-05-26 10:17:20 +02001359 mutex_unlock(&c_node->mem_lock);
1360
Eunchul Kimcb471f142012-12-14 18:10:31 +09001361 /* stop operations */
1362 if (ippdrv->stop)
1363 ippdrv->stop(ippdrv->dev, property->cmd);
1364
1365 return ret;
1366}
1367
1368void ipp_sched_cmd(struct work_struct *work)
1369{
1370 struct drm_exynos_ipp_cmd_work *cmd_work =
1371 (struct drm_exynos_ipp_cmd_work *)work;
1372 struct exynos_drm_ippdrv *ippdrv;
1373 struct drm_exynos_ipp_cmd_node *c_node;
1374 struct drm_exynos_ipp_property *property;
1375 int ret;
1376
Eunchul Kimcb471f142012-12-14 18:10:31 +09001377 ippdrv = cmd_work->ippdrv;
1378 if (!ippdrv) {
1379 DRM_ERROR("invalid ippdrv list.\n");
1380 return;
1381 }
1382
1383 c_node = cmd_work->c_node;
1384 if (!c_node) {
1385 DRM_ERROR("invalid command node list.\n");
1386 return;
1387 }
1388
YoungJun Cho4e4fe552014-05-26 10:17:17 +02001389 mutex_lock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001390
1391 property = &c_node->property;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001392
1393 switch (cmd_work->ctrl) {
1394 case IPP_CTRL_PLAY:
1395 case IPP_CTRL_RESUME:
1396 ret = ipp_start_property(ippdrv, c_node);
1397 if (ret) {
1398 DRM_ERROR("failed to start property:prop_id[%d]\n",
1399 c_node->property.prop_id);
1400 goto err_unlock;
1401 }
1402
1403 /*
1404 * M2M case supports wait_completion of transfer.
1405 * because M2M case supports single unit operation
1406 * with multiple queue.
1407 * M2M need to wait completion of data transfer.
1408 */
1409 if (ipp_is_m2m_cmd(property->cmd)) {
1410 if (!wait_for_completion_timeout
1411 (&c_node->start_complete, msecs_to_jiffies(200))) {
1412 DRM_ERROR("timeout event:prop_id[%d]\n",
1413 c_node->property.prop_id);
1414 goto err_unlock;
1415 }
1416 }
1417 break;
1418 case IPP_CTRL_STOP:
1419 case IPP_CTRL_PAUSE:
1420 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1421 c_node);
1422 if (ret) {
1423 DRM_ERROR("failed to stop property.\n");
1424 goto err_unlock;
1425 }
1426
1427 complete(&c_node->stop_complete);
1428 break;
1429 default:
1430 DRM_ERROR("unknown control type\n");
1431 break;
1432 }
1433
YoungJun Chocbc4c332013-06-12 10:44:40 +09001434 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001435
1436err_unlock:
YoungJun Cho4e4fe552014-05-26 10:17:17 +02001437 mutex_unlock(&c_node->lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001438}
1439
1440static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1441 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1442{
1443 struct drm_device *drm_dev = ippdrv->drm_dev;
1444 struct drm_exynos_ipp_property *property = &c_node->property;
1445 struct drm_exynos_ipp_mem_node *m_node;
1446 struct drm_exynos_ipp_queue_buf qbuf;
1447 struct drm_exynos_ipp_send_event *e;
1448 struct list_head *head;
1449 struct timeval now;
1450 unsigned long flags;
1451 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1452 int ret, i;
1453
1454 for_each_ipp_ops(i)
YoungJun Chocbc4c332013-06-12 10:44:40 +09001455 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001456
1457 if (!drm_dev) {
1458 DRM_ERROR("failed to get drm_dev.\n");
1459 return -EINVAL;
1460 }
1461
1462 if (!property) {
1463 DRM_ERROR("failed to get property.\n");
1464 return -EINVAL;
1465 }
1466
YoungJun Cho4d520762014-05-26 10:17:21 +02001467 mutex_lock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001468 if (list_empty(&c_node->event_list)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001469 DRM_DEBUG_KMS("event list is empty.\n");
YoungJun Cho4d520762014-05-26 10:17:21 +02001470 ret = 0;
1471 goto err_event_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001472 }
1473
YoungJun Cho220db6f2014-05-26 10:17:20 +02001474 mutex_lock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001475 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001476 DRM_DEBUG_KMS("empty memory.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001477 ret = 0;
1478 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001479 }
1480
1481 /* check command */
1482 switch (property->cmd) {
1483 case IPP_CMD_M2M:
1484 for_each_ipp_ops(i) {
1485 /* source/destination memory list */
1486 head = &c_node->mem_list[i];
1487
1488 m_node = list_first_entry(head,
1489 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001490
1491 tbuf_id[i] = m_node->buf_id;
YoungJun Chocbc4c332013-06-12 10:44:40 +09001492 DRM_DEBUG_KMS("%s buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001493 i ? "dst" : "src", tbuf_id[i]);
1494
1495 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1496 if (ret)
1497 DRM_ERROR("failed to put m_node.\n");
1498 }
1499 break;
1500 case IPP_CMD_WB:
1501 /* clear buf for finding */
1502 memset(&qbuf, 0x0, sizeof(qbuf));
1503 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1504 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1505
1506 /* get memory node entry */
1507 m_node = ipp_find_mem_node(c_node, &qbuf);
1508 if (!m_node) {
1509 DRM_ERROR("empty memory node.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001510 ret = -ENOMEM;
1511 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001512 }
1513
1514 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1515
1516 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1517 if (ret)
1518 DRM_ERROR("failed to put m_node.\n");
1519 break;
1520 case IPP_CMD_OUTPUT:
1521 /* source memory list */
1522 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1523
1524 m_node = list_first_entry(head,
1525 struct drm_exynos_ipp_mem_node, list);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001526
1527 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1528
1529 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1530 if (ret)
1531 DRM_ERROR("failed to put m_node.\n");
1532 break;
1533 default:
1534 DRM_ERROR("invalid operations.\n");
YoungJun Cho220db6f2014-05-26 10:17:20 +02001535 ret = -EINVAL;
1536 goto err_mem_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001537 }
YoungJun Cho220db6f2014-05-26 10:17:20 +02001538 mutex_unlock(&c_node->mem_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001539
1540 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1541 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1542 tbuf_id[1], buf_id[1], property->prop_id);
1543
1544 /*
1545 * command node have event list of destination buffer
1546 * If destination buffer enqueue to mem list,
1547 * then we make event and link to event list tail.
1548 * so, we get first event for first enqueued buffer.
1549 */
1550 e = list_first_entry(&c_node->event_list,
1551 struct drm_exynos_ipp_send_event, base.link);
1552
Eunchul Kimcb471f142012-12-14 18:10:31 +09001553 do_gettimeofday(&now);
YoungJun Chocbc4c332013-06-12 10:44:40 +09001554 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001555 e->event.tv_sec = now.tv_sec;
1556 e->event.tv_usec = now.tv_usec;
1557 e->event.prop_id = property->prop_id;
1558
1559 /* set buffer id about source destination */
1560 for_each_ipp_ops(i)
1561 e->event.buf_id[i] = tbuf_id[i];
1562
1563 spin_lock_irqsave(&drm_dev->event_lock, flags);
1564 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1565 wake_up_interruptible(&e->base.file_priv->event_wait);
1566 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
YoungJun Cho4d520762014-05-26 10:17:21 +02001567 mutex_unlock(&c_node->event_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001568
YoungJun Chocbc4c332013-06-12 10:44:40 +09001569 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001570 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1571
1572 return 0;
YoungJun Cho220db6f2014-05-26 10:17:20 +02001573
1574err_mem_unlock:
1575 mutex_unlock(&c_node->mem_lock);
YoungJun Cho4d520762014-05-26 10:17:21 +02001576err_event_unlock:
1577 mutex_unlock(&c_node->event_lock);
YoungJun Cho220db6f2014-05-26 10:17:20 +02001578 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001579}
1580
1581void ipp_sched_event(struct work_struct *work)
1582{
1583 struct drm_exynos_ipp_event_work *event_work =
1584 (struct drm_exynos_ipp_event_work *)work;
1585 struct exynos_drm_ippdrv *ippdrv;
1586 struct drm_exynos_ipp_cmd_node *c_node;
1587 int ret;
1588
1589 if (!event_work) {
1590 DRM_ERROR("failed to get event_work.\n");
1591 return;
1592 }
1593
YoungJun Chocbc4c332013-06-12 10:44:40 +09001594 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001595
1596 ippdrv = event_work->ippdrv;
1597 if (!ippdrv) {
1598 DRM_ERROR("failed to get ipp driver.\n");
1599 return;
1600 }
1601
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001602 c_node = ippdrv->c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001603 if (!c_node) {
1604 DRM_ERROR("failed to get command node.\n");
1605 return;
1606 }
1607
1608 /*
1609 * IPP supports command thread, event thread synchronization.
1610 * If IPP close immediately from user land, then IPP make
1611 * synchronization with command thread, so make complete event.
1612 * or going out operations.
1613 */
1614 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001615 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1616 c_node->state, c_node->property.prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001617 goto err_completion;
1618 }
1619
Eunchul Kimcb471f142012-12-14 18:10:31 +09001620 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1621 if (ret) {
1622 DRM_ERROR("failed to send event.\n");
1623 goto err_completion;
1624 }
1625
1626err_completion:
1627 if (ipp_is_m2m_cmd(c_node->property.cmd))
1628 complete(&c_node->start_complete);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001629}
1630
1631static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1632{
1633 struct ipp_context *ctx = get_ipp_context(dev);
1634 struct exynos_drm_ippdrv *ippdrv;
1635 int ret, count = 0;
1636
Eunchul Kimcb471f142012-12-14 18:10:31 +09001637 /* get ipp driver entry */
1638 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001639 u32 ipp_id;
1640
Eunchul Kimcb471f142012-12-14 18:10:31 +09001641 ippdrv->drm_dev = drm_dev;
1642
1643 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001644 &ipp_id);
1645 if (ret || ipp_id == 0) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001646 DRM_ERROR("failed to create id.\n");
YoungJun Cho075436b2014-05-26 10:17:19 +02001647 goto err;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001648 }
1649
YoungJun Chocbc4c332013-06-12 10:44:40 +09001650 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001651 count++, (int)ippdrv, ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001652
Andrzej Hajda31646052014-05-19 12:54:05 +02001653 ippdrv->prop_list.ipp_id = ipp_id;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001654
1655 /* store parent device for node */
1656 ippdrv->parent_dev = dev;
1657
1658 /* store event work queue and handler */
1659 ippdrv->event_workq = ctx->event_workq;
1660 ippdrv->sched_event = ipp_sched_event;
1661 INIT_LIST_HEAD(&ippdrv->cmd_list);
YoungJun Cho7f5af052014-05-26 10:17:18 +02001662 mutex_init(&ippdrv->cmd_lock);
Eunchul Kimc12e2612012-12-14 17:58:54 +09001663
1664 if (is_drm_iommu_supported(drm_dev)) {
1665 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1666 if (ret) {
1667 DRM_ERROR("failed to activate iommu\n");
YoungJun Cho075436b2014-05-26 10:17:19 +02001668 goto err;
Eunchul Kimc12e2612012-12-14 17:58:54 +09001669 }
1670 }
Eunchul Kimcb471f142012-12-14 18:10:31 +09001671 }
1672
1673 return 0;
1674
YoungJun Cho075436b2014-05-26 10:17:19 +02001675err:
Eunchul Kimc12e2612012-12-14 17:58:54 +09001676 /* get ipp driver entry */
YoungJun Cho075436b2014-05-26 10:17:19 +02001677 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1678 drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001679 if (is_drm_iommu_supported(drm_dev))
1680 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1681
YoungJun Cho075436b2014-05-26 10:17:19 +02001682 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1683 ippdrv->prop_list.ipp_id);
1684 }
1685
Eunchul Kimcb471f142012-12-14 18:10:31 +09001686 return ret;
1687}
1688
1689static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1690{
1691 struct exynos_drm_ippdrv *ippdrv;
YoungJun Cho075436b2014-05-26 10:17:19 +02001692 struct ipp_context *ctx = get_ipp_context(dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001693
Eunchul Kimcb471f142012-12-14 18:10:31 +09001694 /* get ipp driver entry */
1695 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001696 if (is_drm_iommu_supported(drm_dev))
1697 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1698
YoungJun Cho075436b2014-05-26 10:17:19 +02001699 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1700 ippdrv->prop_list.ipp_id);
1701
Eunchul Kimcb471f142012-12-14 18:10:31 +09001702 ippdrv->drm_dev = NULL;
1703 exynos_drm_ippdrv_unregister(ippdrv);
1704 }
1705}
1706
1707static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1708 struct drm_file *file)
1709{
1710 struct drm_exynos_file_private *file_priv = file->driver_priv;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001711
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001712 file_priv->ipp_dev = dev;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001713
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001714 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001715
1716 return 0;
1717}
1718
1719static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1720 struct drm_file *file)
1721{
1722 struct drm_exynos_file_private *file_priv = file->driver_priv;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001723 struct exynos_drm_ippdrv *ippdrv = NULL;
YoungJun Cho075436b2014-05-26 10:17:19 +02001724 struct ipp_context *ctx = get_ipp_context(dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001725 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1726 int count = 0;
1727
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001728 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001729
Eunchul Kimcb471f142012-12-14 18:10:31 +09001730 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
YoungJun Cho7f5af052014-05-26 10:17:18 +02001731 mutex_lock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001732 list_for_each_entry_safe(c_node, tc_node,
1733 &ippdrv->cmd_list, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001734 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1735 count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001736
Andrzej Hajda5c76c5b2014-07-03 15:10:28 +02001737 if (c_node->dev == file_priv->ipp_dev) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001738 /*
1739 * userland goto unnormal state. process killed.
1740 * and close the file.
1741 * so, IPP didn't called stop cmd ctrl.
1742 * so, we are make stop operation in this state.
1743 */
1744 if (c_node->state == IPP_STATE_START) {
1745 ipp_stop_property(drm_dev, ippdrv,
1746 c_node);
1747 c_node->state = IPP_STATE_STOP;
1748 }
1749
1750 ippdrv->dedicated = false;
YoungJun Cho075436b2014-05-26 10:17:19 +02001751 ipp_clean_cmd_node(ctx, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001752 if (list_empty(&ippdrv->cmd_list))
1753 pm_runtime_put_sync(ippdrv->dev);
1754 }
1755 }
YoungJun Cho7f5af052014-05-26 10:17:18 +02001756 mutex_unlock(&ippdrv->cmd_lock);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001757 }
1758
Eunchul Kimcb471f142012-12-14 18:10:31 +09001759 return;
1760}
1761
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001762static int ipp_probe(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001763{
1764 struct device *dev = &pdev->dev;
1765 struct ipp_context *ctx;
1766 struct exynos_drm_subdrv *subdrv;
1767 int ret;
1768
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001769 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001770 if (!ctx)
1771 return -ENOMEM;
1772
Eunchul Kimcb471f142012-12-14 18:10:31 +09001773 mutex_init(&ctx->ipp_lock);
1774 mutex_init(&ctx->prop_lock);
1775
1776 idr_init(&ctx->ipp_idr);
1777 idr_init(&ctx->prop_idr);
1778
1779 /*
1780 * create single thread for ipp event
1781 * IPP supports event thread for IPP drivers.
1782 * IPP driver send event_work to this thread.
1783 * and IPP event thread send event to user process.
1784 */
1785 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1786 if (!ctx->event_workq) {
1787 dev_err(dev, "failed to create event workqueue\n");
Sachin Kamatbfb6ed22012-12-24 14:03:42 +05301788 return -EINVAL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001789 }
1790
1791 /*
1792 * create single thread for ipp command
1793 * IPP supports command thread for user process.
1794 * user process make command node using set property ioctl.
1795 * and make start_work and send this work to command thread.
1796 * and then this command thread start property.
1797 */
1798 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1799 if (!ctx->cmd_workq) {
1800 dev_err(dev, "failed to create cmd workqueue\n");
1801 ret = -EINVAL;
1802 goto err_event_workq;
1803 }
1804
1805 /* set sub driver informations */
1806 subdrv = &ctx->subdrv;
1807 subdrv->dev = dev;
1808 subdrv->probe = ipp_subdrv_probe;
1809 subdrv->remove = ipp_subdrv_remove;
1810 subdrv->open = ipp_subdrv_open;
1811 subdrv->close = ipp_subdrv_close;
1812
1813 platform_set_drvdata(pdev, ctx);
1814
1815 ret = exynos_drm_subdrv_register(subdrv);
1816 if (ret < 0) {
1817 DRM_ERROR("failed to register drm ipp device.\n");
1818 goto err_cmd_workq;
1819 }
1820
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001821 dev_info(dev, "drm ipp registered successfully.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001822
1823 return 0;
1824
1825err_cmd_workq:
1826 destroy_workqueue(ctx->cmd_workq);
1827err_event_workq:
1828 destroy_workqueue(ctx->event_workq);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001829 return ret;
1830}
1831
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001832static int ipp_remove(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001833{
1834 struct ipp_context *ctx = platform_get_drvdata(pdev);
1835
Eunchul Kimcb471f142012-12-14 18:10:31 +09001836 /* unregister sub driver */
1837 exynos_drm_subdrv_unregister(&ctx->subdrv);
1838
1839 /* remove,destroy ipp idr */
Eunchul Kimcb471f142012-12-14 18:10:31 +09001840 idr_destroy(&ctx->ipp_idr);
1841 idr_destroy(&ctx->prop_idr);
1842
1843 mutex_destroy(&ctx->ipp_lock);
1844 mutex_destroy(&ctx->prop_lock);
1845
1846 /* destroy command, event work queue */
1847 destroy_workqueue(ctx->cmd_workq);
1848 destroy_workqueue(ctx->event_workq);
1849
Eunchul Kimcb471f142012-12-14 18:10:31 +09001850 return 0;
1851}
1852
1853static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1854{
YoungJun Chocbc4c332013-06-12 10:44:40 +09001855 DRM_DEBUG_KMS("enable[%d]\n", enable);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001856
1857 return 0;
1858}
1859
1860#ifdef CONFIG_PM_SLEEP
1861static int ipp_suspend(struct device *dev)
1862{
1863 struct ipp_context *ctx = get_ipp_context(dev);
1864
Eunchul Kimcb471f142012-12-14 18:10:31 +09001865 if (pm_runtime_suspended(dev))
1866 return 0;
1867
1868 return ipp_power_ctrl(ctx, false);
1869}
1870
1871static int ipp_resume(struct device *dev)
1872{
1873 struct ipp_context *ctx = get_ipp_context(dev);
1874
Eunchul Kimcb471f142012-12-14 18:10:31 +09001875 if (!pm_runtime_suspended(dev))
1876 return ipp_power_ctrl(ctx, true);
1877
1878 return 0;
1879}
1880#endif
1881
1882#ifdef CONFIG_PM_RUNTIME
1883static int ipp_runtime_suspend(struct device *dev)
1884{
1885 struct ipp_context *ctx = get_ipp_context(dev);
1886
Eunchul Kimcb471f142012-12-14 18:10:31 +09001887 return ipp_power_ctrl(ctx, false);
1888}
1889
1890static int ipp_runtime_resume(struct device *dev)
1891{
1892 struct ipp_context *ctx = get_ipp_context(dev);
1893
Eunchul Kimcb471f142012-12-14 18:10:31 +09001894 return ipp_power_ctrl(ctx, true);
1895}
1896#endif
1897
1898static const struct dev_pm_ops ipp_pm_ops = {
1899 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1900 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1901};
1902
1903struct platform_driver ipp_driver = {
1904 .probe = ipp_probe,
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001905 .remove = ipp_remove,
Eunchul Kimcb471f142012-12-14 18:10:31 +09001906 .driver = {
1907 .name = "exynos-drm-ipp",
1908 .owner = THIS_MODULE,
1909 .pm = &ipp_pm_ops,
1910 },
1911};
1912