blob: 09687778940d44ea0c645d2678f8dc5d369fdb28 [file] [log] [blame]
Eunchul Kimcb471f142012-12-14 18:10:31 +09001/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090015#include <linux/platform_device.h>
16#include <linux/types.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
Eunchul Kimcb471f142012-12-14 18:10:31 +090019
20#include <drm/drmP.h>
21#include <drm/exynos_drm.h>
22#include "exynos_drm_drv.h"
23#include "exynos_drm_gem.h"
24#include "exynos_drm_ipp.h"
Eunchul Kimc12e2612012-12-14 17:58:54 +090025#include "exynos_drm_iommu.h"
Eunchul Kimcb471f142012-12-14 18:10:31 +090026
27/*
Eunchul Kim6fe891f2012-12-22 17:49:26 +090028 * IPP stands for Image Post Processing and
Eunchul Kimcb471f142012-12-14 18:10:31 +090029 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
32 */
33
34/*
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
43 */
44
45#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
47
Seung-Woo Kim43f41902013-04-23 14:02:53 +090048/* platform device pointer for ipp device. */
49static struct platform_device *exynos_drm_ipp_pdev;
50
Eunchul Kimcb471f142012-12-14 18:10:31 +090051/*
52 * A structure of event.
53 *
54 * @base: base of event.
55 * @event: ipp event.
56 */
57struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
60};
61
62/*
63 * A structure of memory node.
64 *
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
71 */
72struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
75 u32 prop_id;
76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79};
80
81/*
82 * A structure of ipp context.
83 *
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
91 */
92struct ipp_context {
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
96 struct idr ipp_idr;
97 struct idr prop_idr;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
100};
101
102static LIST_HEAD(exynos_drm_ippdrv_list);
103static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
105
Seung-Woo Kim43f41902013-04-23 14:02:53 +0900106int exynos_platform_device_ipp_register(void)
107{
108 struct platform_device *pdev;
109
110 if (exynos_drm_ipp_pdev)
111 return -EEXIST;
112
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
114 if (IS_ERR(pdev))
115 return PTR_ERR(pdev);
116
117 exynos_drm_ipp_pdev = pdev;
118
119 return 0;
120}
121
122void exynos_platform_device_ipp_unregister(void)
123{
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
127 }
128}
129
Eunchul Kimcb471f142012-12-14 18:10:31 +0900130int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
131{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900132 if (!ippdrv)
133 return -EINVAL;
134
135 mutex_lock(&exynos_drm_ippdrv_lock);
136 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
137 mutex_unlock(&exynos_drm_ippdrv_lock);
138
139 return 0;
140}
141
142int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
143{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900144 if (!ippdrv)
145 return -EINVAL;
146
147 mutex_lock(&exynos_drm_ippdrv_lock);
148 list_del(&ippdrv->drv_list);
149 mutex_unlock(&exynos_drm_ippdrv_lock);
150
151 return 0;
152}
153
154static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
155 u32 *idp)
156{
157 int ret;
158
Eunchul Kimcb471f142012-12-14 18:10:31 +0900159 /* do the allocation under our mutexlock */
160 mutex_lock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800161 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900162 mutex_unlock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800163 if (ret < 0)
164 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900165
Tejun Heo8550cb22013-02-27 17:04:09 -0800166 *idp = ret;
167 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900168}
169
170static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
171{
172 void *obj;
173
YoungJun Chocbc4c332013-06-12 10:44:40 +0900174 DRM_DEBUG_KMS("id[%d]\n", id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900175
176 mutex_lock(lock);
177
178 /* find object using handle */
179 obj = idr_find(id_idr, id);
180 if (!obj) {
181 DRM_ERROR("failed to find object.\n");
182 mutex_unlock(lock);
183 return ERR_PTR(-ENODEV);
184 }
185
186 mutex_unlock(lock);
187
188 return obj;
189}
190
191static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
192 enum drm_exynos_ipp_cmd cmd)
193{
194 /*
195 * check dedicated flag and WB, OUTPUT operation with
196 * power on state.
197 */
198 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
199 !pm_runtime_suspended(ippdrv->dev)))
200 return true;
201
202 return false;
203}
204
205static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
206 struct drm_exynos_ipp_property *property)
207{
208 struct exynos_drm_ippdrv *ippdrv;
209 u32 ipp_id = property->ipp_id;
210
YoungJun Chocbc4c332013-06-12 10:44:40 +0900211 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900212
213 if (ipp_id) {
214 /* find ipp driver using idr */
215 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
216 ipp_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530217 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900218 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
219 return ippdrv;
220 }
221
222 /*
223 * WB, OUTPUT opertion not supported multi-operation.
224 * so, make dedicated state at set property ioctl.
225 * when ipp driver finished operations, clear dedicated flags.
226 */
227 if (ipp_check_dedicated(ippdrv, property->cmd)) {
228 DRM_ERROR("already used choose device.\n");
229 return ERR_PTR(-EBUSY);
230 }
231
232 /*
233 * This is necessary to find correct device in ipp drivers.
234 * ipp drivers have different abilities,
235 * so need to check property.
236 */
237 if (ippdrv->check_property &&
238 ippdrv->check_property(ippdrv->dev, property)) {
239 DRM_ERROR("not support property.\n");
240 return ERR_PTR(-EINVAL);
241 }
242
243 return ippdrv;
244 } else {
245 /*
246 * This case is search all ipp driver for finding.
247 * user application don't set ipp_id in this case,
248 * so ipp subsystem search correct driver in driver list.
249 */
250 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
251 if (ipp_check_dedicated(ippdrv, property->cmd)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900252 DRM_DEBUG_KMS("used device.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900253 continue;
254 }
255
256 if (ippdrv->check_property &&
257 ippdrv->check_property(ippdrv->dev, property)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900258 DRM_DEBUG_KMS("not support property.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900259 continue;
260 }
261
262 return ippdrv;
263 }
264
265 DRM_ERROR("not support ipp driver operations.\n");
266 }
267
268 return ERR_PTR(-ENODEV);
269}
270
271static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
272{
273 struct exynos_drm_ippdrv *ippdrv;
274 struct drm_exynos_ipp_cmd_node *c_node;
275 int count = 0;
276
YoungJun Chocbc4c332013-06-12 10:44:40 +0900277 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900278
Eunchul Kimcb471f142012-12-14 18:10:31 +0900279 /*
280 * This case is search ipp driver by prop_id handle.
281 * sometimes, ipp subsystem find driver by prop_id.
282 * e.g PAUSE state, queue buf, command contro.
283 */
284 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900285 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900286
YoungJun Choc66ce402014-05-26 10:17:15 +0200287 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
288 if (c_node->property.prop_id == prop_id)
289 return ippdrv;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900290 }
291
292 return ERR_PTR(-ENODEV);
293}
294
295int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
296 struct drm_file *file)
297{
298 struct drm_exynos_file_private *file_priv = file->driver_priv;
299 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
300 struct device *dev = priv->dev;
301 struct ipp_context *ctx = get_ipp_context(dev);
302 struct drm_exynos_ipp_prop_list *prop_list = data;
303 struct exynos_drm_ippdrv *ippdrv;
304 int count = 0;
305
Eunchul Kimcb471f142012-12-14 18:10:31 +0900306 if (!ctx) {
307 DRM_ERROR("invalid context.\n");
308 return -EINVAL;
309 }
310
311 if (!prop_list) {
312 DRM_ERROR("invalid property parameter.\n");
313 return -EINVAL;
314 }
315
YoungJun Chocbc4c332013-06-12 10:44:40 +0900316 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900317
318 if (!prop_list->ipp_id) {
319 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
320 count++;
321 /*
322 * Supports ippdrv list count for user application.
323 * First step user application getting ippdrv count.
324 * and second step getting ippdrv capability using ipp_id.
325 */
326 prop_list->count = count;
327 } else {
328 /*
329 * Getting ippdrv capability by ipp_id.
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900330 * some device not supported wb, output interface.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900331 * so, user application detect correct ipp driver
332 * using this ioctl.
333 */
334 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
335 prop_list->ipp_id);
Wei Yongjunbe348792013-07-04 21:35:00 +0800336 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900337 DRM_ERROR("not found ipp%d driver.\n",
338 prop_list->ipp_id);
Wei Yongjunbe348792013-07-04 21:35:00 +0800339 return PTR_ERR(ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900340 }
341
Andrzej Hajda31646052014-05-19 12:54:05 +0200342 *prop_list = ippdrv->prop_list;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900343 }
344
345 return 0;
346}
347
348static void ipp_print_property(struct drm_exynos_ipp_property *property,
349 int idx)
350{
351 struct drm_exynos_ipp_config *config = &property->config[idx];
352 struct drm_exynos_pos *pos = &config->pos;
353 struct drm_exynos_sz *sz = &config->sz;
354
YoungJun Chocbc4c332013-06-12 10:44:40 +0900355 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
356 property->prop_id, idx ? "dst" : "src", config->fmt);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900357
YoungJun Chocbc4c332013-06-12 10:44:40 +0900358 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
359 pos->x, pos->y, pos->w, pos->h,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900360 sz->hsize, sz->vsize, config->flip, config->degree);
361}
362
363static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
364{
365 struct exynos_drm_ippdrv *ippdrv;
366 struct drm_exynos_ipp_cmd_node *c_node;
367 u32 prop_id = property->prop_id;
368
YoungJun Chocbc4c332013-06-12 10:44:40 +0900369 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900370
371 ippdrv = ipp_find_drv_by_handle(prop_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530372 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900373 DRM_ERROR("failed to get ipp driver.\n");
374 return -EINVAL;
375 }
376
377 /*
378 * Find command node using command list in ippdrv.
379 * when we find this command no using prop_id.
380 * return property information set in this command node.
381 */
382 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
383 if ((c_node->property.prop_id == prop_id) &&
384 (c_node->state == IPP_STATE_STOP)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900385 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
386 property->cmd, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900387
388 c_node->property = *property;
389 return 0;
390 }
391 }
392
393 DRM_ERROR("failed to search property.\n");
394
395 return -EINVAL;
396}
397
398static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
399{
400 struct drm_exynos_ipp_cmd_work *cmd_work;
401
Eunchul Kimcb471f142012-12-14 18:10:31 +0900402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900403 if (!cmd_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900404 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900405
406 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
407
408 return cmd_work;
409}
410
411static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
412{
413 struct drm_exynos_ipp_event_work *event_work;
414
Eunchul Kimcb471f142012-12-14 18:10:31 +0900415 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900416 if (!event_work)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900417 return ERR_PTR(-ENOMEM);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900418
419 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
420
421 return event_work;
422}
423
424int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
425 struct drm_file *file)
426{
427 struct drm_exynos_file_private *file_priv = file->driver_priv;
428 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
429 struct device *dev = priv->dev;
430 struct ipp_context *ctx = get_ipp_context(dev);
431 struct drm_exynos_ipp_property *property = data;
432 struct exynos_drm_ippdrv *ippdrv;
433 struct drm_exynos_ipp_cmd_node *c_node;
434 int ret, i;
435
Eunchul Kimcb471f142012-12-14 18:10:31 +0900436 if (!ctx) {
437 DRM_ERROR("invalid context.\n");
438 return -EINVAL;
439 }
440
441 if (!property) {
442 DRM_ERROR("invalid property parameter.\n");
443 return -EINVAL;
444 }
445
446 /*
447 * This is log print for user application property.
448 * user application set various property.
449 */
450 for_each_ipp_ops(i)
451 ipp_print_property(property, i);
452
453 /*
454 * set property ioctl generated new prop_id.
455 * but in this case already asigned prop_id using old set property.
456 * e.g PAUSE state. this case supports find current prop_id and use it
457 * instead of allocation.
458 */
459 if (property->prop_id) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900460 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900461 return ipp_find_and_set_property(property);
462 }
463
464 /* find ipp driver using ipp id */
465 ippdrv = ipp_find_driver(ctx, property);
Sachin Kamatf0250452013-04-29 12:27:06 +0530466 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900467 DRM_ERROR("failed to get ipp driver.\n");
468 return -EINVAL;
469 }
470
471 /* allocate command node */
472 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900473 if (!c_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900474 return -ENOMEM;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900475
476 /* create property id */
477 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
478 &property->prop_id);
479 if (ret) {
480 DRM_ERROR("failed to create id.\n");
481 goto err_clear;
482 }
483
YoungJun Chocbc4c332013-06-12 10:44:40 +0900484 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
485 property->prop_id, property->cmd, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900486
487 /* stored property information and ippdrv in private data */
488 c_node->priv = priv;
489 c_node->property = *property;
490 c_node->state = IPP_STATE_IDLE;
491
492 c_node->start_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530493 if (IS_ERR(c_node->start_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900494 DRM_ERROR("failed to create start work.\n");
495 goto err_clear;
496 }
497
498 c_node->stop_work = ipp_create_cmd_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530499 if (IS_ERR(c_node->stop_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900500 DRM_ERROR("failed to create stop work.\n");
501 goto err_free_start;
502 }
503
504 c_node->event_work = ipp_create_event_work();
Sachin Kamatf0250452013-04-29 12:27:06 +0530505 if (IS_ERR(c_node->event_work)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900506 DRM_ERROR("failed to create event work.\n");
507 goto err_free_stop;
508 }
509
510 mutex_init(&c_node->cmd_lock);
511 mutex_init(&c_node->mem_lock);
512 mutex_init(&c_node->event_lock);
513
514 init_completion(&c_node->start_complete);
515 init_completion(&c_node->stop_complete);
516
517 for_each_ipp_ops(i)
518 INIT_LIST_HEAD(&c_node->mem_list[i]);
519
520 INIT_LIST_HEAD(&c_node->event_list);
521 list_splice_init(&priv->event_list, &c_node->event_list);
522 list_add_tail(&c_node->list, &ippdrv->cmd_list);
523
524 /* make dedicated state without m2m */
525 if (!ipp_is_m2m_cmd(property->cmd))
526 ippdrv->dedicated = true;
527
528 return 0;
529
530err_free_stop:
531 kfree(c_node->stop_work);
532err_free_start:
533 kfree(c_node->start_work);
534err_clear:
535 kfree(c_node);
536 return ret;
537}
538
539static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
540{
Eunchul Kimcb471f142012-12-14 18:10:31 +0900541 /* delete list */
542 list_del(&c_node->list);
543
544 /* destroy mutex */
545 mutex_destroy(&c_node->cmd_lock);
546 mutex_destroy(&c_node->mem_lock);
547 mutex_destroy(&c_node->event_lock);
548
549 /* free command node */
550 kfree(c_node->start_work);
551 kfree(c_node->stop_work);
552 kfree(c_node->event_work);
553 kfree(c_node);
554}
555
556static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
557{
558 struct drm_exynos_ipp_property *property = &c_node->property;
559 struct drm_exynos_ipp_mem_node *m_node;
560 struct list_head *head;
561 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
562
Eunchul Kimcb471f142012-12-14 18:10:31 +0900563 mutex_lock(&c_node->mem_lock);
564
565 for_each_ipp_ops(i) {
566 /* source/destination memory list */
567 head = &c_node->mem_list[i];
568
Eunchul Kimcb471f142012-12-14 18:10:31 +0900569 /* find memory node entry */
570 list_for_each_entry(m_node, head, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900571 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +0900572 i ? "dst" : "src", count[i], (int)m_node);
573 count[i]++;
574 }
575 }
576
YoungJun Chocbc4c332013-06-12 10:44:40 +0900577 DRM_DEBUG_KMS("min[%d]max[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +0900578 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
579 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
580
581 /*
582 * M2M operations should be need paired memory address.
583 * so, need to check minimum count about src, dst.
584 * other case not use paired memory, so use maximum count
585 */
586 if (ipp_is_m2m_cmd(property->cmd))
587 ret = min(count[EXYNOS_DRM_OPS_SRC],
588 count[EXYNOS_DRM_OPS_DST]);
589 else
590 ret = max(count[EXYNOS_DRM_OPS_SRC],
591 count[EXYNOS_DRM_OPS_DST]);
592
593 mutex_unlock(&c_node->mem_lock);
594
595 return ret;
596}
597
598static struct drm_exynos_ipp_mem_node
599 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
600 struct drm_exynos_ipp_queue_buf *qbuf)
601{
602 struct drm_exynos_ipp_mem_node *m_node;
603 struct list_head *head;
604 int count = 0;
605
YoungJun Chocbc4c332013-06-12 10:44:40 +0900606 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900607
608 /* source/destination memory list */
609 head = &c_node->mem_list[qbuf->ops_id];
610
611 /* find memory node from memory list */
612 list_for_each_entry(m_node, head, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900613 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900614
615 /* compare buffer id */
616 if (m_node->buf_id == qbuf->buf_id)
617 return m_node;
618 }
619
620 return NULL;
621}
622
623static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
624 struct drm_exynos_ipp_cmd_node *c_node,
625 struct drm_exynos_ipp_mem_node *m_node)
626{
627 struct exynos_drm_ipp_ops *ops = NULL;
628 int ret = 0;
629
YoungJun Chocbc4c332013-06-12 10:44:40 +0900630 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900631
632 if (!m_node) {
633 DRM_ERROR("invalid queue node.\n");
634 return -EFAULT;
635 }
636
637 mutex_lock(&c_node->mem_lock);
638
YoungJun Chocbc4c332013-06-12 10:44:40 +0900639 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900640
641 /* get operations callback */
642 ops = ippdrv->ops[m_node->ops_id];
643 if (!ops) {
644 DRM_ERROR("not support ops.\n");
645 ret = -EFAULT;
646 goto err_unlock;
647 }
648
649 /* set address and enable irq */
650 if (ops->set_addr) {
651 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
652 m_node->buf_id, IPP_BUF_ENQUEUE);
653 if (ret) {
654 DRM_ERROR("failed to set addr.\n");
655 goto err_unlock;
656 }
657 }
658
659err_unlock:
660 mutex_unlock(&c_node->mem_lock);
661 return ret;
662}
663
664static struct drm_exynos_ipp_mem_node
665 *ipp_get_mem_node(struct drm_device *drm_dev,
666 struct drm_file *file,
667 struct drm_exynos_ipp_cmd_node *c_node,
668 struct drm_exynos_ipp_queue_buf *qbuf)
669{
670 struct drm_exynos_ipp_mem_node *m_node;
671 struct drm_exynos_ipp_buf_info buf_info;
672 void *addr;
673 int i;
674
Eunchul Kimcb471f142012-12-14 18:10:31 +0900675 mutex_lock(&c_node->mem_lock);
676
677 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900678 if (!m_node)
Eunchul Kimcb471f142012-12-14 18:10:31 +0900679 goto err_unlock;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900680
681 /* clear base address for error handling */
682 memset(&buf_info, 0x0, sizeof(buf_info));
683
684 /* operations, buffer id */
685 m_node->ops_id = qbuf->ops_id;
686 m_node->prop_id = qbuf->prop_id;
687 m_node->buf_id = qbuf->buf_id;
688
YoungJun Chocbc4c332013-06-12 10:44:40 +0900689 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
690 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900691
692 for_each_ipp_planar(i) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900693 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900694
695 /* get dma address by handle */
696 if (qbuf->handle[i]) {
697 addr = exynos_drm_gem_get_dma_addr(drm_dev,
698 qbuf->handle[i], file);
699 if (IS_ERR(addr)) {
700 DRM_ERROR("failed to get addr.\n");
701 goto err_clear;
702 }
703
704 buf_info.handles[i] = qbuf->handle[i];
705 buf_info.base[i] = *(dma_addr_t *) addr;
YoungJun Chocbc4c332013-06-12 10:44:40 +0900706 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
707 i, buf_info.base[i], (int)buf_info.handles[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900708 }
709 }
710
711 m_node->filp = file;
712 m_node->buf_info = buf_info;
713 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
714
715 mutex_unlock(&c_node->mem_lock);
716 return m_node;
717
718err_clear:
719 kfree(m_node);
720err_unlock:
721 mutex_unlock(&c_node->mem_lock);
722 return ERR_PTR(-EFAULT);
723}
724
725static int ipp_put_mem_node(struct drm_device *drm_dev,
726 struct drm_exynos_ipp_cmd_node *c_node,
727 struct drm_exynos_ipp_mem_node *m_node)
728{
729 int i;
730
YoungJun Chocbc4c332013-06-12 10:44:40 +0900731 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900732
733 if (!m_node) {
734 DRM_ERROR("invalid dequeue node.\n");
735 return -EFAULT;
736 }
737
738 if (list_empty(&m_node->list)) {
739 DRM_ERROR("empty memory node.\n");
740 return -ENOMEM;
741 }
742
743 mutex_lock(&c_node->mem_lock);
744
YoungJun Chocbc4c332013-06-12 10:44:40 +0900745 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900746
747 /* put gem buffer */
748 for_each_ipp_planar(i) {
749 unsigned long handle = m_node->buf_info.handles[i];
750 if (handle)
751 exynos_drm_gem_put_dma_addr(drm_dev, handle,
752 m_node->filp);
753 }
754
755 /* delete list in queue */
756 list_del(&m_node->list);
757 kfree(m_node);
758
759 mutex_unlock(&c_node->mem_lock);
760
761 return 0;
762}
763
764static void ipp_free_event(struct drm_pending_event *event)
765{
766 kfree(event);
767}
768
769static int ipp_get_event(struct drm_device *drm_dev,
770 struct drm_file *file,
771 struct drm_exynos_ipp_cmd_node *c_node,
772 struct drm_exynos_ipp_queue_buf *qbuf)
773{
774 struct drm_exynos_ipp_send_event *e;
775 unsigned long flags;
776
YoungJun Chocbc4c332013-06-12 10:44:40 +0900777 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900778
779 e = kzalloc(sizeof(*e), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900780 if (!e) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900781 spin_lock_irqsave(&drm_dev->event_lock, flags);
782 file->event_space += sizeof(e->event);
783 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
784 return -ENOMEM;
785 }
786
787 /* make event */
788 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
789 e->event.base.length = sizeof(e->event);
790 e->event.user_data = qbuf->user_data;
791 e->event.prop_id = qbuf->prop_id;
792 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
793 e->base.event = &e->event.base;
794 e->base.file_priv = file;
795 e->base.destroy = ipp_free_event;
796 list_add_tail(&e->base.link, &c_node->event_list);
797
798 return 0;
799}
800
801static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
802 struct drm_exynos_ipp_queue_buf *qbuf)
803{
804 struct drm_exynos_ipp_send_event *e, *te;
805 int count = 0;
806
Eunchul Kimcb471f142012-12-14 18:10:31 +0900807 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900808 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900809
810 /*
Sachin Kamat4fe25b82014-01-16 10:00:23 +0530811 * qbuf == NULL condition means all event deletion.
Eunchul Kimcb471f142012-12-14 18:10:31 +0900812 * stop operations want to delete all event list.
813 * another case delete only same buf id.
814 */
815 if (!qbuf) {
816 /* delete list */
817 list_del(&e->base.link);
818 kfree(e);
819 }
820
821 /* compare buffer id */
822 if (qbuf && (qbuf->buf_id ==
823 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
824 /* delete list */
825 list_del(&e->base.link);
826 kfree(e);
827 return;
828 }
829 }
830}
831
Sachin Kamat0bc4a0a2013-01-14 12:29:10 +0530832static void ipp_handle_cmd_work(struct device *dev,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900833 struct exynos_drm_ippdrv *ippdrv,
834 struct drm_exynos_ipp_cmd_work *cmd_work,
835 struct drm_exynos_ipp_cmd_node *c_node)
836{
837 struct ipp_context *ctx = get_ipp_context(dev);
838
839 cmd_work->ippdrv = ippdrv;
840 cmd_work->c_node = c_node;
841 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
842}
843
844static int ipp_queue_buf_with_run(struct device *dev,
845 struct drm_exynos_ipp_cmd_node *c_node,
846 struct drm_exynos_ipp_mem_node *m_node,
847 struct drm_exynos_ipp_queue_buf *qbuf)
848{
849 struct exynos_drm_ippdrv *ippdrv;
850 struct drm_exynos_ipp_property *property;
851 struct exynos_drm_ipp_ops *ops;
852 int ret;
853
Eunchul Kimcb471f142012-12-14 18:10:31 +0900854 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
Sachin Kamatf0250452013-04-29 12:27:06 +0530855 if (IS_ERR(ippdrv)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900856 DRM_ERROR("failed to get ipp driver.\n");
857 return -EFAULT;
858 }
859
860 ops = ippdrv->ops[qbuf->ops_id];
861 if (!ops) {
862 DRM_ERROR("failed to get ops.\n");
863 return -EFAULT;
864 }
865
866 property = &c_node->property;
867
868 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900869 DRM_DEBUG_KMS("bypass for invalid state.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900870 return 0;
871 }
872
873 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +0900874 DRM_DEBUG_KMS("empty memory.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +0900875 return 0;
876 }
877
878 /*
879 * If set destination buffer and enabled clock,
880 * then m2m operations need start operations at queue_buf
881 */
882 if (ipp_is_m2m_cmd(property->cmd)) {
883 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
884
885 cmd_work->ctrl = IPP_CTRL_PLAY;
886 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
887 } else {
888 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
889 if (ret) {
890 DRM_ERROR("failed to set m node.\n");
891 return ret;
892 }
893 }
894
895 return 0;
896}
897
898static void ipp_clean_queue_buf(struct drm_device *drm_dev,
899 struct drm_exynos_ipp_cmd_node *c_node,
900 struct drm_exynos_ipp_queue_buf *qbuf)
901{
902 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
903
YoungJun Choc66ce402014-05-26 10:17:15 +0200904 /* delete list */
905 list_for_each_entry_safe(m_node, tm_node,
906 &c_node->mem_list[qbuf->ops_id], list) {
907 if (m_node->buf_id == qbuf->buf_id &&
908 m_node->ops_id == qbuf->ops_id)
909 ipp_put_mem_node(drm_dev, c_node, m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900910 }
911}
912
913int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
914 struct drm_file *file)
915{
916 struct drm_exynos_file_private *file_priv = file->driver_priv;
917 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
918 struct device *dev = priv->dev;
919 struct ipp_context *ctx = get_ipp_context(dev);
920 struct drm_exynos_ipp_queue_buf *qbuf = data;
921 struct drm_exynos_ipp_cmd_node *c_node;
922 struct drm_exynos_ipp_mem_node *m_node;
923 int ret;
924
Eunchul Kimcb471f142012-12-14 18:10:31 +0900925 if (!qbuf) {
926 DRM_ERROR("invalid buf parameter.\n");
927 return -EINVAL;
928 }
929
930 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
931 DRM_ERROR("invalid ops parameter.\n");
932 return -EINVAL;
933 }
934
YoungJun Chocbc4c332013-06-12 10:44:40 +0900935 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
936 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
Eunchul Kimcb471f142012-12-14 18:10:31 +0900937 qbuf->buf_id, qbuf->buf_type);
938
939 /* find command node */
940 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
941 qbuf->prop_id);
Wei Yongjunbe348792013-07-04 21:35:00 +0800942 if (IS_ERR(c_node)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +0900943 DRM_ERROR("failed to get command node.\n");
Wei Yongjunbe348792013-07-04 21:35:00 +0800944 return PTR_ERR(c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900945 }
946
947 /* buffer control */
948 switch (qbuf->buf_type) {
949 case IPP_BUF_ENQUEUE:
950 /* get memory node */
951 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
952 if (IS_ERR(m_node)) {
953 DRM_ERROR("failed to get m_node.\n");
954 return PTR_ERR(m_node);
955 }
956
957 /*
958 * first step get event for destination buffer.
959 * and second step when M2M case run with destination buffer
960 * if needed.
961 */
962 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
963 /* get event for destination buffer */
964 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
965 if (ret) {
966 DRM_ERROR("failed to get event.\n");
967 goto err_clean_node;
968 }
969
970 /*
971 * M2M case run play control for streaming feature.
972 * other case set address and waiting.
973 */
974 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
975 if (ret) {
976 DRM_ERROR("failed to run command.\n");
977 goto err_clean_node;
978 }
979 }
980 break;
981 case IPP_BUF_DEQUEUE:
982 mutex_lock(&c_node->cmd_lock);
983
984 /* put event for destination buffer */
985 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
986 ipp_put_event(c_node, qbuf);
987
988 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
989
990 mutex_unlock(&c_node->cmd_lock);
991 break;
992 default:
993 DRM_ERROR("invalid buffer control.\n");
994 return -EINVAL;
995 }
996
997 return 0;
998
999err_clean_node:
1000 DRM_ERROR("clean memory nodes.\n");
1001
1002 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1003 return ret;
1004}
1005
1006static bool exynos_drm_ipp_check_valid(struct device *dev,
1007 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1008{
Eunchul Kimcb471f142012-12-14 18:10:31 +09001009 if (ctrl != IPP_CTRL_PLAY) {
1010 if (pm_runtime_suspended(dev)) {
1011 DRM_ERROR("pm:runtime_suspended.\n");
1012 goto err_status;
1013 }
1014 }
1015
1016 switch (ctrl) {
1017 case IPP_CTRL_PLAY:
1018 if (state != IPP_STATE_IDLE)
1019 goto err_status;
1020 break;
1021 case IPP_CTRL_STOP:
1022 if (state == IPP_STATE_STOP)
1023 goto err_status;
1024 break;
1025 case IPP_CTRL_PAUSE:
1026 if (state != IPP_STATE_START)
1027 goto err_status;
1028 break;
1029 case IPP_CTRL_RESUME:
1030 if (state != IPP_STATE_STOP)
1031 goto err_status;
1032 break;
1033 default:
1034 DRM_ERROR("invalid state.\n");
1035 goto err_status;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001036 }
1037
1038 return true;
1039
1040err_status:
1041 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1042 return false;
1043}
1044
1045int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1046 struct drm_file *file)
1047{
1048 struct drm_exynos_file_private *file_priv = file->driver_priv;
1049 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1050 struct exynos_drm_ippdrv *ippdrv = NULL;
1051 struct device *dev = priv->dev;
1052 struct ipp_context *ctx = get_ipp_context(dev);
1053 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1054 struct drm_exynos_ipp_cmd_work *cmd_work;
1055 struct drm_exynos_ipp_cmd_node *c_node;
1056
Eunchul Kimcb471f142012-12-14 18:10:31 +09001057 if (!ctx) {
1058 DRM_ERROR("invalid context.\n");
1059 return -EINVAL;
1060 }
1061
1062 if (!cmd_ctrl) {
1063 DRM_ERROR("invalid control parameter.\n");
1064 return -EINVAL;
1065 }
1066
YoungJun Chocbc4c332013-06-12 10:44:40 +09001067 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001068 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1069
1070 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1071 if (IS_ERR(ippdrv)) {
1072 DRM_ERROR("failed to get ipp driver.\n");
1073 return PTR_ERR(ippdrv);
1074 }
1075
1076 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1077 cmd_ctrl->prop_id);
Wei Yongjunbe348792013-07-04 21:35:00 +08001078 if (IS_ERR(c_node)) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001079 DRM_ERROR("invalid command node list.\n");
Wei Yongjunbe348792013-07-04 21:35:00 +08001080 return PTR_ERR(c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001081 }
1082
1083 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1084 c_node->state)) {
1085 DRM_ERROR("invalid state.\n");
1086 return -EINVAL;
1087 }
1088
1089 switch (cmd_ctrl->ctrl) {
1090 case IPP_CTRL_PLAY:
1091 if (pm_runtime_suspended(ippdrv->dev))
1092 pm_runtime_get_sync(ippdrv->dev);
YoungJun Choebaf05c2014-05-26 10:17:16 +02001093
Eunchul Kimcb471f142012-12-14 18:10:31 +09001094 c_node->state = IPP_STATE_START;
1095
1096 cmd_work = c_node->start_work;
1097 cmd_work->ctrl = cmd_ctrl->ctrl;
1098 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001099 break;
1100 case IPP_CTRL_STOP:
1101 cmd_work = c_node->stop_work;
1102 cmd_work->ctrl = cmd_ctrl->ctrl;
1103 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1104
1105 if (!wait_for_completion_timeout(&c_node->stop_complete,
1106 msecs_to_jiffies(300))) {
1107 DRM_ERROR("timeout stop:prop_id[%d]\n",
1108 c_node->property.prop_id);
1109 }
1110
1111 c_node->state = IPP_STATE_STOP;
1112 ippdrv->dedicated = false;
1113 ipp_clean_cmd_node(c_node);
1114
1115 if (list_empty(&ippdrv->cmd_list))
1116 pm_runtime_put_sync(ippdrv->dev);
1117 break;
1118 case IPP_CTRL_PAUSE:
1119 cmd_work = c_node->stop_work;
1120 cmd_work->ctrl = cmd_ctrl->ctrl;
1121 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1122
1123 if (!wait_for_completion_timeout(&c_node->stop_complete,
1124 msecs_to_jiffies(200))) {
1125 DRM_ERROR("timeout stop:prop_id[%d]\n",
1126 c_node->property.prop_id);
1127 }
1128
1129 c_node->state = IPP_STATE_STOP;
1130 break;
1131 case IPP_CTRL_RESUME:
1132 c_node->state = IPP_STATE_START;
1133 cmd_work = c_node->start_work;
1134 cmd_work->ctrl = cmd_ctrl->ctrl;
1135 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1136 break;
1137 default:
1138 DRM_ERROR("could not support this state currently.\n");
1139 return -EINVAL;
1140 }
1141
YoungJun Chocbc4c332013-06-12 10:44:40 +09001142 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001143 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1144
1145 return 0;
1146}
1147
1148int exynos_drm_ippnb_register(struct notifier_block *nb)
1149{
1150 return blocking_notifier_chain_register(
1151 &exynos_drm_ippnb_list, nb);
1152}
1153
1154int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1155{
1156 return blocking_notifier_chain_unregister(
1157 &exynos_drm_ippnb_list, nb);
1158}
1159
1160int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1161{
1162 return blocking_notifier_call_chain(
1163 &exynos_drm_ippnb_list, val, v);
1164}
1165
1166static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1167 struct drm_exynos_ipp_property *property)
1168{
1169 struct exynos_drm_ipp_ops *ops = NULL;
1170 bool swap = false;
1171 int ret, i;
1172
1173 if (!property) {
1174 DRM_ERROR("invalid property parameter.\n");
1175 return -EINVAL;
1176 }
1177
YoungJun Chocbc4c332013-06-12 10:44:40 +09001178 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001179
1180 /* reset h/w block */
1181 if (ippdrv->reset &&
1182 ippdrv->reset(ippdrv->dev)) {
1183 DRM_ERROR("failed to reset.\n");
1184 return -EINVAL;
1185 }
1186
1187 /* set source,destination operations */
1188 for_each_ipp_ops(i) {
1189 struct drm_exynos_ipp_config *config =
1190 &property->config[i];
1191
1192 ops = ippdrv->ops[i];
1193 if (!ops || !config) {
1194 DRM_ERROR("not support ops and config.\n");
1195 return -EINVAL;
1196 }
1197
1198 /* set format */
1199 if (ops->set_fmt) {
1200 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1201 if (ret) {
1202 DRM_ERROR("not support format.\n");
1203 return ret;
1204 }
1205 }
1206
1207 /* set transform for rotation, flip */
1208 if (ops->set_transf) {
1209 ret = ops->set_transf(ippdrv->dev, config->degree,
1210 config->flip, &swap);
1211 if (ret) {
1212 DRM_ERROR("not support tranf.\n");
1213 return -EINVAL;
1214 }
1215 }
1216
1217 /* set size */
1218 if (ops->set_size) {
1219 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1220 &config->sz);
1221 if (ret) {
1222 DRM_ERROR("not support size.\n");
1223 return ret;
1224 }
1225 }
1226 }
1227
1228 return 0;
1229}
1230
1231static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1232 struct drm_exynos_ipp_cmd_node *c_node)
1233{
1234 struct drm_exynos_ipp_mem_node *m_node;
1235 struct drm_exynos_ipp_property *property = &c_node->property;
1236 struct list_head *head;
1237 int ret, i;
1238
YoungJun Chocbc4c332013-06-12 10:44:40 +09001239 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001240
1241 /* store command info in ippdrv */
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001242 ippdrv->c_node = c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001243
1244 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001245 DRM_DEBUG_KMS("empty memory.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001246 return -ENOMEM;
1247 }
1248
1249 /* set current property in ippdrv */
1250 ret = ipp_set_property(ippdrv, property);
1251 if (ret) {
1252 DRM_ERROR("failed to set property.\n");
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001253 ippdrv->c_node = NULL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001254 return ret;
1255 }
1256
1257 /* check command */
1258 switch (property->cmd) {
1259 case IPP_CMD_M2M:
1260 for_each_ipp_ops(i) {
1261 /* source/destination memory list */
1262 head = &c_node->mem_list[i];
1263
1264 m_node = list_first_entry(head,
1265 struct drm_exynos_ipp_mem_node, list);
1266 if (!m_node) {
1267 DRM_ERROR("failed to get node.\n");
1268 ret = -EFAULT;
1269 return ret;
1270 }
1271
YoungJun Chocbc4c332013-06-12 10:44:40 +09001272 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001273
1274 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1275 if (ret) {
1276 DRM_ERROR("failed to set m node.\n");
1277 return ret;
1278 }
1279 }
1280 break;
1281 case IPP_CMD_WB:
1282 /* destination memory list */
1283 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1284
1285 list_for_each_entry(m_node, head, list) {
1286 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1287 if (ret) {
1288 DRM_ERROR("failed to set m node.\n");
1289 return ret;
1290 }
1291 }
1292 break;
1293 case IPP_CMD_OUTPUT:
1294 /* source memory list */
1295 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1296
1297 list_for_each_entry(m_node, head, list) {
1298 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1299 if (ret) {
1300 DRM_ERROR("failed to set m node.\n");
1301 return ret;
1302 }
1303 }
1304 break;
1305 default:
1306 DRM_ERROR("invalid operations.\n");
1307 return -EINVAL;
1308 }
1309
YoungJun Chocbc4c332013-06-12 10:44:40 +09001310 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001311
1312 /* start operations */
1313 if (ippdrv->start) {
1314 ret = ippdrv->start(ippdrv->dev, property->cmd);
1315 if (ret) {
1316 DRM_ERROR("failed to start ops.\n");
1317 return ret;
1318 }
1319 }
1320
1321 return 0;
1322}
1323
1324static int ipp_stop_property(struct drm_device *drm_dev,
1325 struct exynos_drm_ippdrv *ippdrv,
1326 struct drm_exynos_ipp_cmd_node *c_node)
1327{
1328 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1329 struct drm_exynos_ipp_property *property = &c_node->property;
1330 struct list_head *head;
1331 int ret = 0, i;
1332
YoungJun Chocbc4c332013-06-12 10:44:40 +09001333 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001334
1335 /* put event */
1336 ipp_put_event(c_node, NULL);
1337
1338 /* check command */
1339 switch (property->cmd) {
1340 case IPP_CMD_M2M:
1341 for_each_ipp_ops(i) {
1342 /* source/destination memory list */
1343 head = &c_node->mem_list[i];
1344
Eunchul Kimcb471f142012-12-14 18:10:31 +09001345 list_for_each_entry_safe(m_node, tm_node,
1346 head, list) {
1347 ret = ipp_put_mem_node(drm_dev, c_node,
1348 m_node);
1349 if (ret) {
1350 DRM_ERROR("failed to put m_node.\n");
1351 goto err_clear;
1352 }
1353 }
1354 }
1355 break;
1356 case IPP_CMD_WB:
1357 /* destination memory list */
1358 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1359
Eunchul Kimcb471f142012-12-14 18:10:31 +09001360 list_for_each_entry_safe(m_node, tm_node, head, list) {
1361 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1362 if (ret) {
1363 DRM_ERROR("failed to put m_node.\n");
1364 goto err_clear;
1365 }
1366 }
1367 break;
1368 case IPP_CMD_OUTPUT:
1369 /* source memory list */
1370 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1371
Eunchul Kimcb471f142012-12-14 18:10:31 +09001372 list_for_each_entry_safe(m_node, tm_node, head, list) {
1373 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1374 if (ret) {
1375 DRM_ERROR("failed to put m_node.\n");
1376 goto err_clear;
1377 }
1378 }
1379 break;
1380 default:
1381 DRM_ERROR("invalid operations.\n");
1382 ret = -EINVAL;
1383 goto err_clear;
1384 }
1385
1386err_clear:
1387 /* stop operations */
1388 if (ippdrv->stop)
1389 ippdrv->stop(ippdrv->dev, property->cmd);
1390
1391 return ret;
1392}
1393
1394void ipp_sched_cmd(struct work_struct *work)
1395{
1396 struct drm_exynos_ipp_cmd_work *cmd_work =
1397 (struct drm_exynos_ipp_cmd_work *)work;
1398 struct exynos_drm_ippdrv *ippdrv;
1399 struct drm_exynos_ipp_cmd_node *c_node;
1400 struct drm_exynos_ipp_property *property;
1401 int ret;
1402
Eunchul Kimcb471f142012-12-14 18:10:31 +09001403 ippdrv = cmd_work->ippdrv;
1404 if (!ippdrv) {
1405 DRM_ERROR("invalid ippdrv list.\n");
1406 return;
1407 }
1408
1409 c_node = cmd_work->c_node;
1410 if (!c_node) {
1411 DRM_ERROR("invalid command node list.\n");
1412 return;
1413 }
1414
1415 mutex_lock(&c_node->cmd_lock);
1416
1417 property = &c_node->property;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001418
1419 switch (cmd_work->ctrl) {
1420 case IPP_CTRL_PLAY:
1421 case IPP_CTRL_RESUME:
1422 ret = ipp_start_property(ippdrv, c_node);
1423 if (ret) {
1424 DRM_ERROR("failed to start property:prop_id[%d]\n",
1425 c_node->property.prop_id);
1426 goto err_unlock;
1427 }
1428
1429 /*
1430 * M2M case supports wait_completion of transfer.
1431 * because M2M case supports single unit operation
1432 * with multiple queue.
1433 * M2M need to wait completion of data transfer.
1434 */
1435 if (ipp_is_m2m_cmd(property->cmd)) {
1436 if (!wait_for_completion_timeout
1437 (&c_node->start_complete, msecs_to_jiffies(200))) {
1438 DRM_ERROR("timeout event:prop_id[%d]\n",
1439 c_node->property.prop_id);
1440 goto err_unlock;
1441 }
1442 }
1443 break;
1444 case IPP_CTRL_STOP:
1445 case IPP_CTRL_PAUSE:
1446 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1447 c_node);
1448 if (ret) {
1449 DRM_ERROR("failed to stop property.\n");
1450 goto err_unlock;
1451 }
1452
1453 complete(&c_node->stop_complete);
1454 break;
1455 default:
1456 DRM_ERROR("unknown control type\n");
1457 break;
1458 }
1459
YoungJun Chocbc4c332013-06-12 10:44:40 +09001460 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001461
1462err_unlock:
1463 mutex_unlock(&c_node->cmd_lock);
1464}
1465
1466static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1467 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1468{
1469 struct drm_device *drm_dev = ippdrv->drm_dev;
1470 struct drm_exynos_ipp_property *property = &c_node->property;
1471 struct drm_exynos_ipp_mem_node *m_node;
1472 struct drm_exynos_ipp_queue_buf qbuf;
1473 struct drm_exynos_ipp_send_event *e;
1474 struct list_head *head;
1475 struct timeval now;
1476 unsigned long flags;
1477 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1478 int ret, i;
1479
1480 for_each_ipp_ops(i)
YoungJun Chocbc4c332013-06-12 10:44:40 +09001481 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001482
1483 if (!drm_dev) {
1484 DRM_ERROR("failed to get drm_dev.\n");
1485 return -EINVAL;
1486 }
1487
1488 if (!property) {
1489 DRM_ERROR("failed to get property.\n");
1490 return -EINVAL;
1491 }
1492
1493 if (list_empty(&c_node->event_list)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001494 DRM_DEBUG_KMS("event list is empty.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001495 return 0;
1496 }
1497
1498 if (!ipp_check_mem_list(c_node)) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001499 DRM_DEBUG_KMS("empty memory.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001500 return 0;
1501 }
1502
1503 /* check command */
1504 switch (property->cmd) {
1505 case IPP_CMD_M2M:
1506 for_each_ipp_ops(i) {
1507 /* source/destination memory list */
1508 head = &c_node->mem_list[i];
1509
1510 m_node = list_first_entry(head,
1511 struct drm_exynos_ipp_mem_node, list);
1512 if (!m_node) {
1513 DRM_ERROR("empty memory node.\n");
1514 return -ENOMEM;
1515 }
1516
1517 tbuf_id[i] = m_node->buf_id;
YoungJun Chocbc4c332013-06-12 10:44:40 +09001518 DRM_DEBUG_KMS("%s buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001519 i ? "dst" : "src", tbuf_id[i]);
1520
1521 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1522 if (ret)
1523 DRM_ERROR("failed to put m_node.\n");
1524 }
1525 break;
1526 case IPP_CMD_WB:
1527 /* clear buf for finding */
1528 memset(&qbuf, 0x0, sizeof(qbuf));
1529 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1530 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1531
1532 /* get memory node entry */
1533 m_node = ipp_find_mem_node(c_node, &qbuf);
1534 if (!m_node) {
1535 DRM_ERROR("empty memory node.\n");
1536 return -ENOMEM;
1537 }
1538
1539 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1540
1541 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1542 if (ret)
1543 DRM_ERROR("failed to put m_node.\n");
1544 break;
1545 case IPP_CMD_OUTPUT:
1546 /* source memory list */
1547 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1548
1549 m_node = list_first_entry(head,
1550 struct drm_exynos_ipp_mem_node, list);
1551 if (!m_node) {
1552 DRM_ERROR("empty memory node.\n");
1553 return -ENOMEM;
1554 }
1555
1556 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1557
1558 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1559 if (ret)
1560 DRM_ERROR("failed to put m_node.\n");
1561 break;
1562 default:
1563 DRM_ERROR("invalid operations.\n");
1564 return -EINVAL;
1565 }
1566
1567 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1568 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1569 tbuf_id[1], buf_id[1], property->prop_id);
1570
1571 /*
1572 * command node have event list of destination buffer
1573 * If destination buffer enqueue to mem list,
1574 * then we make event and link to event list tail.
1575 * so, we get first event for first enqueued buffer.
1576 */
1577 e = list_first_entry(&c_node->event_list,
1578 struct drm_exynos_ipp_send_event, base.link);
1579
1580 if (!e) {
1581 DRM_ERROR("empty event.\n");
1582 return -EINVAL;
1583 }
1584
1585 do_gettimeofday(&now);
YoungJun Chocbc4c332013-06-12 10:44:40 +09001586 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001587 e->event.tv_sec = now.tv_sec;
1588 e->event.tv_usec = now.tv_usec;
1589 e->event.prop_id = property->prop_id;
1590
1591 /* set buffer id about source destination */
1592 for_each_ipp_ops(i)
1593 e->event.buf_id[i] = tbuf_id[i];
1594
1595 spin_lock_irqsave(&drm_dev->event_lock, flags);
1596 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1597 wake_up_interruptible(&e->base.file_priv->event_wait);
1598 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1599
YoungJun Chocbc4c332013-06-12 10:44:40 +09001600 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
Eunchul Kimcb471f142012-12-14 18:10:31 +09001601 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1602
1603 return 0;
1604}
1605
1606void ipp_sched_event(struct work_struct *work)
1607{
1608 struct drm_exynos_ipp_event_work *event_work =
1609 (struct drm_exynos_ipp_event_work *)work;
1610 struct exynos_drm_ippdrv *ippdrv;
1611 struct drm_exynos_ipp_cmd_node *c_node;
1612 int ret;
1613
1614 if (!event_work) {
1615 DRM_ERROR("failed to get event_work.\n");
1616 return;
1617 }
1618
YoungJun Chocbc4c332013-06-12 10:44:40 +09001619 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001620
1621 ippdrv = event_work->ippdrv;
1622 if (!ippdrv) {
1623 DRM_ERROR("failed to get ipp driver.\n");
1624 return;
1625 }
1626
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001627 c_node = ippdrv->c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001628 if (!c_node) {
1629 DRM_ERROR("failed to get command node.\n");
1630 return;
1631 }
1632
1633 /*
1634 * IPP supports command thread, event thread synchronization.
1635 * If IPP close immediately from user land, then IPP make
1636 * synchronization with command thread, so make complete event.
1637 * or going out operations.
1638 */
1639 if (c_node->state != IPP_STATE_START) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001640 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1641 c_node->state, c_node->property.prop_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001642 goto err_completion;
1643 }
1644
1645 mutex_lock(&c_node->event_lock);
1646
1647 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1648 if (ret) {
1649 DRM_ERROR("failed to send event.\n");
1650 goto err_completion;
1651 }
1652
1653err_completion:
1654 if (ipp_is_m2m_cmd(c_node->property.cmd))
1655 complete(&c_node->start_complete);
1656
1657 mutex_unlock(&c_node->event_lock);
1658}
1659
1660static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1661{
1662 struct ipp_context *ctx = get_ipp_context(dev);
1663 struct exynos_drm_ippdrv *ippdrv;
1664 int ret, count = 0;
1665
Eunchul Kimcb471f142012-12-14 18:10:31 +09001666 /* get ipp driver entry */
1667 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001668 u32 ipp_id;
1669
Eunchul Kimcb471f142012-12-14 18:10:31 +09001670 ippdrv->drm_dev = drm_dev;
1671
1672 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001673 &ipp_id);
1674 if (ret || ipp_id == 0) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001675 DRM_ERROR("failed to create id.\n");
1676 goto err_idr;
1677 }
1678
YoungJun Chocbc4c332013-06-12 10:44:40 +09001679 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
Andrzej Hajdaf51bcee2014-05-19 12:54:04 +02001680 count++, (int)ippdrv, ipp_id);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001681
Andrzej Hajda31646052014-05-19 12:54:05 +02001682 ippdrv->prop_list.ipp_id = ipp_id;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001683
1684 /* store parent device for node */
1685 ippdrv->parent_dev = dev;
1686
1687 /* store event work queue and handler */
1688 ippdrv->event_workq = ctx->event_workq;
1689 ippdrv->sched_event = ipp_sched_event;
1690 INIT_LIST_HEAD(&ippdrv->cmd_list);
Eunchul Kimc12e2612012-12-14 17:58:54 +09001691
1692 if (is_drm_iommu_supported(drm_dev)) {
1693 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1694 if (ret) {
1695 DRM_ERROR("failed to activate iommu\n");
1696 goto err_iommu;
1697 }
1698 }
Eunchul Kimcb471f142012-12-14 18:10:31 +09001699 }
1700
1701 return 0;
1702
Eunchul Kimc12e2612012-12-14 17:58:54 +09001703err_iommu:
1704 /* get ipp driver entry */
1705 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1706 if (is_drm_iommu_supported(drm_dev))
1707 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1708
Eunchul Kimcb471f142012-12-14 18:10:31 +09001709err_idr:
Eunchul Kimcb471f142012-12-14 18:10:31 +09001710 idr_destroy(&ctx->ipp_idr);
1711 idr_destroy(&ctx->prop_idr);
1712 return ret;
1713}
1714
1715static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1716{
1717 struct exynos_drm_ippdrv *ippdrv;
1718
Eunchul Kimcb471f142012-12-14 18:10:31 +09001719 /* get ipp driver entry */
1720 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001721 if (is_drm_iommu_supported(drm_dev))
1722 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1723
Eunchul Kimcb471f142012-12-14 18:10:31 +09001724 ippdrv->drm_dev = NULL;
1725 exynos_drm_ippdrv_unregister(ippdrv);
1726 }
1727}
1728
1729static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1730 struct drm_file *file)
1731{
1732 struct drm_exynos_file_private *file_priv = file->driver_priv;
1733 struct exynos_drm_ipp_private *priv;
1734
Eunchul Kimcb471f142012-12-14 18:10:31 +09001735 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +09001736 if (!priv)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001737 return -ENOMEM;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001738 priv->dev = dev;
1739 file_priv->ipp_priv = priv;
1740
1741 INIT_LIST_HEAD(&priv->event_list);
1742
YoungJun Chocbc4c332013-06-12 10:44:40 +09001743 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001744
1745 return 0;
1746}
1747
1748static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1749 struct drm_file *file)
1750{
1751 struct drm_exynos_file_private *file_priv = file->driver_priv;
1752 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1753 struct exynos_drm_ippdrv *ippdrv = NULL;
1754 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1755 int count = 0;
1756
YoungJun Chocbc4c332013-06-12 10:44:40 +09001757 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001758
Eunchul Kimcb471f142012-12-14 18:10:31 +09001759 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Eunchul Kimcb471f142012-12-14 18:10:31 +09001760 list_for_each_entry_safe(c_node, tc_node,
1761 &ippdrv->cmd_list, list) {
YoungJun Chocbc4c332013-06-12 10:44:40 +09001762 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1763 count++, (int)ippdrv);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001764
1765 if (c_node->priv == priv) {
1766 /*
1767 * userland goto unnormal state. process killed.
1768 * and close the file.
1769 * so, IPP didn't called stop cmd ctrl.
1770 * so, we are make stop operation in this state.
1771 */
1772 if (c_node->state == IPP_STATE_START) {
1773 ipp_stop_property(drm_dev, ippdrv,
1774 c_node);
1775 c_node->state = IPP_STATE_STOP;
1776 }
1777
1778 ippdrv->dedicated = false;
1779 ipp_clean_cmd_node(c_node);
1780 if (list_empty(&ippdrv->cmd_list))
1781 pm_runtime_put_sync(ippdrv->dev);
1782 }
1783 }
1784 }
1785
Eunchul Kimcb471f142012-12-14 18:10:31 +09001786 kfree(priv);
1787 return;
1788}
1789
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001790static int ipp_probe(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001791{
1792 struct device *dev = &pdev->dev;
1793 struct ipp_context *ctx;
1794 struct exynos_drm_subdrv *subdrv;
1795 int ret;
1796
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001797 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001798 if (!ctx)
1799 return -ENOMEM;
1800
Eunchul Kimcb471f142012-12-14 18:10:31 +09001801 mutex_init(&ctx->ipp_lock);
1802 mutex_init(&ctx->prop_lock);
1803
1804 idr_init(&ctx->ipp_idr);
1805 idr_init(&ctx->prop_idr);
1806
1807 /*
1808 * create single thread for ipp event
1809 * IPP supports event thread for IPP drivers.
1810 * IPP driver send event_work to this thread.
1811 * and IPP event thread send event to user process.
1812 */
1813 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1814 if (!ctx->event_workq) {
1815 dev_err(dev, "failed to create event workqueue\n");
Sachin Kamatbfb6ed22012-12-24 14:03:42 +05301816 return -EINVAL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001817 }
1818
1819 /*
1820 * create single thread for ipp command
1821 * IPP supports command thread for user process.
1822 * user process make command node using set property ioctl.
1823 * and make start_work and send this work to command thread.
1824 * and then this command thread start property.
1825 */
1826 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1827 if (!ctx->cmd_workq) {
1828 dev_err(dev, "failed to create cmd workqueue\n");
1829 ret = -EINVAL;
1830 goto err_event_workq;
1831 }
1832
1833 /* set sub driver informations */
1834 subdrv = &ctx->subdrv;
1835 subdrv->dev = dev;
1836 subdrv->probe = ipp_subdrv_probe;
1837 subdrv->remove = ipp_subdrv_remove;
1838 subdrv->open = ipp_subdrv_open;
1839 subdrv->close = ipp_subdrv_close;
1840
1841 platform_set_drvdata(pdev, ctx);
1842
1843 ret = exynos_drm_subdrv_register(subdrv);
1844 if (ret < 0) {
1845 DRM_ERROR("failed to register drm ipp device.\n");
1846 goto err_cmd_workq;
1847 }
1848
Seung-Woo Kimd873ab92013-05-22 21:14:14 +09001849 dev_info(dev, "drm ipp registered successfully.\n");
Eunchul Kimcb471f142012-12-14 18:10:31 +09001850
1851 return 0;
1852
1853err_cmd_workq:
1854 destroy_workqueue(ctx->cmd_workq);
1855err_event_workq:
1856 destroy_workqueue(ctx->event_workq);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001857 return ret;
1858}
1859
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001860static int ipp_remove(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001861{
1862 struct ipp_context *ctx = platform_get_drvdata(pdev);
1863
Eunchul Kimcb471f142012-12-14 18:10:31 +09001864 /* unregister sub driver */
1865 exynos_drm_subdrv_unregister(&ctx->subdrv);
1866
1867 /* remove,destroy ipp idr */
Eunchul Kimcb471f142012-12-14 18:10:31 +09001868 idr_destroy(&ctx->ipp_idr);
1869 idr_destroy(&ctx->prop_idr);
1870
1871 mutex_destroy(&ctx->ipp_lock);
1872 mutex_destroy(&ctx->prop_lock);
1873
1874 /* destroy command, event work queue */
1875 destroy_workqueue(ctx->cmd_workq);
1876 destroy_workqueue(ctx->event_workq);
1877
Eunchul Kimcb471f142012-12-14 18:10:31 +09001878 return 0;
1879}
1880
1881static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1882{
YoungJun Chocbc4c332013-06-12 10:44:40 +09001883 DRM_DEBUG_KMS("enable[%d]\n", enable);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001884
1885 return 0;
1886}
1887
1888#ifdef CONFIG_PM_SLEEP
1889static int ipp_suspend(struct device *dev)
1890{
1891 struct ipp_context *ctx = get_ipp_context(dev);
1892
Eunchul Kimcb471f142012-12-14 18:10:31 +09001893 if (pm_runtime_suspended(dev))
1894 return 0;
1895
1896 return ipp_power_ctrl(ctx, false);
1897}
1898
1899static int ipp_resume(struct device *dev)
1900{
1901 struct ipp_context *ctx = get_ipp_context(dev);
1902
Eunchul Kimcb471f142012-12-14 18:10:31 +09001903 if (!pm_runtime_suspended(dev))
1904 return ipp_power_ctrl(ctx, true);
1905
1906 return 0;
1907}
1908#endif
1909
1910#ifdef CONFIG_PM_RUNTIME
1911static int ipp_runtime_suspend(struct device *dev)
1912{
1913 struct ipp_context *ctx = get_ipp_context(dev);
1914
Eunchul Kimcb471f142012-12-14 18:10:31 +09001915 return ipp_power_ctrl(ctx, false);
1916}
1917
1918static int ipp_runtime_resume(struct device *dev)
1919{
1920 struct ipp_context *ctx = get_ipp_context(dev);
1921
Eunchul Kimcb471f142012-12-14 18:10:31 +09001922 return ipp_power_ctrl(ctx, true);
1923}
1924#endif
1925
1926static const struct dev_pm_ops ipp_pm_ops = {
1927 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1928 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1929};
1930
1931struct platform_driver ipp_driver = {
1932 .probe = ipp_probe,
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001933 .remove = ipp_remove,
Eunchul Kimcb471f142012-12-14 18:10:31 +09001934 .driver = {
1935 .name = "exynos-drm-ipp",
1936 .owner = THIS_MODULE,
1937 .pm = &ipp_pm_ops,
1938 },
1939};
1940