blob: 1adce07ecb5bd6bba59af8556ddcff99428c61b3 [file] [log] [blame]
Eunchul Kimcb471f142012-12-14 18:10:31 +09001/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20#include <plat/map-base.h>
21
22#include <drm/drmP.h>
23#include <drm/exynos_drm.h>
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
Eunchul Kimc12e2612012-12-14 17:58:54 +090027#include "exynos_drm_iommu.h"
Eunchul Kimcb471f142012-12-14 18:10:31 +090028
29/*
Eunchul Kim6fe891f2012-12-22 17:49:26 +090030 * IPP stands for Image Post Processing and
Eunchul Kimcb471f142012-12-14 18:10:31 +090031 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
34 */
35
36/*
37 * TODO
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
45 */
46
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49
50/*
51 * A structure of event.
52 *
53 * @base: base of event.
54 * @event: ipp event.
55 */
56struct drm_exynos_ipp_send_event {
57 struct drm_pending_event base;
58 struct drm_exynos_ipp_event event;
59};
60
61/*
62 * A structure of memory node.
63 *
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
70 */
71struct drm_exynos_ipp_mem_node {
72 struct list_head list;
73 enum drm_exynos_ops_id ops_id;
74 u32 prop_id;
75 u32 buf_id;
76 struct drm_exynos_ipp_buf_info buf_info;
77 struct drm_file *filp;
78};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{
107 DRM_DEBUG_KMS("%s\n", __func__);
108
109 if (!ippdrv)
110 return -EINVAL;
111
112 mutex_lock(&exynos_drm_ippdrv_lock);
113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
114 mutex_unlock(&exynos_drm_ippdrv_lock);
115
116 return 0;
117}
118
119int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
120{
121 DRM_DEBUG_KMS("%s\n", __func__);
122
123 if (!ippdrv)
124 return -EINVAL;
125
126 mutex_lock(&exynos_drm_ippdrv_lock);
127 list_del(&ippdrv->drv_list);
128 mutex_unlock(&exynos_drm_ippdrv_lock);
129
130 return 0;
131}
132
133static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
134 u32 *idp)
135{
136 int ret;
137
138 DRM_DEBUG_KMS("%s\n", __func__);
139
Eunchul Kimcb471f142012-12-14 18:10:31 +0900140 /* do the allocation under our mutexlock */
141 mutex_lock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800142 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +0900143 mutex_unlock(lock);
Tejun Heo8550cb22013-02-27 17:04:09 -0800144 if (ret < 0)
145 return ret;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900146
Tejun Heo8550cb22013-02-27 17:04:09 -0800147 *idp = ret;
148 return 0;
Eunchul Kimcb471f142012-12-14 18:10:31 +0900149}
150
151static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
152{
153 void *obj;
154
155 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
156
157 mutex_lock(lock);
158
159 /* find object using handle */
160 obj = idr_find(id_idr, id);
161 if (!obj) {
162 DRM_ERROR("failed to find object.\n");
163 mutex_unlock(lock);
164 return ERR_PTR(-ENODEV);
165 }
166
167 mutex_unlock(lock);
168
169 return obj;
170}
171
172static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
173 enum drm_exynos_ipp_cmd cmd)
174{
175 /*
176 * check dedicated flag and WB, OUTPUT operation with
177 * power on state.
178 */
179 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
180 !pm_runtime_suspended(ippdrv->dev)))
181 return true;
182
183 return false;
184}
185
186static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
187 struct drm_exynos_ipp_property *property)
188{
189 struct exynos_drm_ippdrv *ippdrv;
190 u32 ipp_id = property->ipp_id;
191
192 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
193
194 if (ipp_id) {
195 /* find ipp driver using idr */
196 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
197 ipp_id);
198 if (IS_ERR_OR_NULL(ippdrv)) {
199 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
200 return ippdrv;
201 }
202
203 /*
204 * WB, OUTPUT opertion not supported multi-operation.
205 * so, make dedicated state at set property ioctl.
206 * when ipp driver finished operations, clear dedicated flags.
207 */
208 if (ipp_check_dedicated(ippdrv, property->cmd)) {
209 DRM_ERROR("already used choose device.\n");
210 return ERR_PTR(-EBUSY);
211 }
212
213 /*
214 * This is necessary to find correct device in ipp drivers.
215 * ipp drivers have different abilities,
216 * so need to check property.
217 */
218 if (ippdrv->check_property &&
219 ippdrv->check_property(ippdrv->dev, property)) {
220 DRM_ERROR("not support property.\n");
221 return ERR_PTR(-EINVAL);
222 }
223
224 return ippdrv;
225 } else {
226 /*
227 * This case is search all ipp driver for finding.
228 * user application don't set ipp_id in this case,
229 * so ipp subsystem search correct driver in driver list.
230 */
231 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
232 if (ipp_check_dedicated(ippdrv, property->cmd)) {
233 DRM_DEBUG_KMS("%s:used device.\n", __func__);
234 continue;
235 }
236
237 if (ippdrv->check_property &&
238 ippdrv->check_property(ippdrv->dev, property)) {
239 DRM_DEBUG_KMS("%s:not support property.\n",
240 __func__);
241 continue;
242 }
243
244 return ippdrv;
245 }
246
247 DRM_ERROR("not support ipp driver operations.\n");
248 }
249
250 return ERR_PTR(-ENODEV);
251}
252
253static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
254{
255 struct exynos_drm_ippdrv *ippdrv;
256 struct drm_exynos_ipp_cmd_node *c_node;
257 int count = 0;
258
259 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
260
261 if (list_empty(&exynos_drm_ippdrv_list)) {
262 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
263 return ERR_PTR(-ENODEV);
264 }
265
266 /*
267 * This case is search ipp driver by prop_id handle.
268 * sometimes, ipp subsystem find driver by prop_id.
269 * e.g PAUSE state, queue buf, command contro.
270 */
271 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
272 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
273 count++, (int)ippdrv);
274
275 if (!list_empty(&ippdrv->cmd_list)) {
276 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
277 if (c_node->property.prop_id == prop_id)
278 return ippdrv;
279 }
280 }
281
282 return ERR_PTR(-ENODEV);
283}
284
285int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
286 struct drm_file *file)
287{
288 struct drm_exynos_file_private *file_priv = file->driver_priv;
289 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
290 struct device *dev = priv->dev;
291 struct ipp_context *ctx = get_ipp_context(dev);
292 struct drm_exynos_ipp_prop_list *prop_list = data;
293 struct exynos_drm_ippdrv *ippdrv;
294 int count = 0;
295
296 DRM_DEBUG_KMS("%s\n", __func__);
297
298 if (!ctx) {
299 DRM_ERROR("invalid context.\n");
300 return -EINVAL;
301 }
302
303 if (!prop_list) {
304 DRM_ERROR("invalid property parameter.\n");
305 return -EINVAL;
306 }
307
308 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
309
310 if (!prop_list->ipp_id) {
311 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
312 count++;
313 /*
314 * Supports ippdrv list count for user application.
315 * First step user application getting ippdrv count.
316 * and second step getting ippdrv capability using ipp_id.
317 */
318 prop_list->count = count;
319 } else {
320 /*
321 * Getting ippdrv capability by ipp_id.
322 * some deivce not supported wb, output interface.
323 * so, user application detect correct ipp driver
324 * using this ioctl.
325 */
326 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
327 prop_list->ipp_id);
328 if (!ippdrv) {
329 DRM_ERROR("not found ipp%d driver.\n",
330 prop_list->ipp_id);
331 return -EINVAL;
332 }
333
334 prop_list = ippdrv->prop_list;
335 }
336
337 return 0;
338}
339
340static void ipp_print_property(struct drm_exynos_ipp_property *property,
341 int idx)
342{
343 struct drm_exynos_ipp_config *config = &property->config[idx];
344 struct drm_exynos_pos *pos = &config->pos;
345 struct drm_exynos_sz *sz = &config->sz;
346
347 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
348 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
349
350 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
351 __func__, pos->x, pos->y, pos->w, pos->h,
352 sz->hsize, sz->vsize, config->flip, config->degree);
353}
354
355static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
356{
357 struct exynos_drm_ippdrv *ippdrv;
358 struct drm_exynos_ipp_cmd_node *c_node;
359 u32 prop_id = property->prop_id;
360
361 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
362
363 ippdrv = ipp_find_drv_by_handle(prop_id);
364 if (IS_ERR_OR_NULL(ippdrv)) {
365 DRM_ERROR("failed to get ipp driver.\n");
366 return -EINVAL;
367 }
368
369 /*
370 * Find command node using command list in ippdrv.
371 * when we find this command no using prop_id.
372 * return property information set in this command node.
373 */
374 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
375 if ((c_node->property.prop_id == prop_id) &&
376 (c_node->state == IPP_STATE_STOP)) {
377 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
378 __func__, property->cmd, (int)ippdrv);
379
380 c_node->property = *property;
381 return 0;
382 }
383 }
384
385 DRM_ERROR("failed to search property.\n");
386
387 return -EINVAL;
388}
389
390static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
391{
392 struct drm_exynos_ipp_cmd_work *cmd_work;
393
394 DRM_DEBUG_KMS("%s\n", __func__);
395
396 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
397 if (!cmd_work) {
398 DRM_ERROR("failed to alloc cmd_work.\n");
399 return ERR_PTR(-ENOMEM);
400 }
401
402 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
403
404 return cmd_work;
405}
406
407static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
408{
409 struct drm_exynos_ipp_event_work *event_work;
410
411 DRM_DEBUG_KMS("%s\n", __func__);
412
413 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
414 if (!event_work) {
415 DRM_ERROR("failed to alloc event_work.\n");
416 return ERR_PTR(-ENOMEM);
417 }
418
419 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
420
421 return event_work;
422}
423
424int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
425 struct drm_file *file)
426{
427 struct drm_exynos_file_private *file_priv = file->driver_priv;
428 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
429 struct device *dev = priv->dev;
430 struct ipp_context *ctx = get_ipp_context(dev);
431 struct drm_exynos_ipp_property *property = data;
432 struct exynos_drm_ippdrv *ippdrv;
433 struct drm_exynos_ipp_cmd_node *c_node;
434 int ret, i;
435
436 DRM_DEBUG_KMS("%s\n", __func__);
437
438 if (!ctx) {
439 DRM_ERROR("invalid context.\n");
440 return -EINVAL;
441 }
442
443 if (!property) {
444 DRM_ERROR("invalid property parameter.\n");
445 return -EINVAL;
446 }
447
448 /*
449 * This is log print for user application property.
450 * user application set various property.
451 */
452 for_each_ipp_ops(i)
453 ipp_print_property(property, i);
454
455 /*
456 * set property ioctl generated new prop_id.
457 * but in this case already asigned prop_id using old set property.
458 * e.g PAUSE state. this case supports find current prop_id and use it
459 * instead of allocation.
460 */
461 if (property->prop_id) {
462 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
463 return ipp_find_and_set_property(property);
464 }
465
466 /* find ipp driver using ipp id */
467 ippdrv = ipp_find_driver(ctx, property);
468 if (IS_ERR_OR_NULL(ippdrv)) {
469 DRM_ERROR("failed to get ipp driver.\n");
470 return -EINVAL;
471 }
472
473 /* allocate command node */
474 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
475 if (!c_node) {
476 DRM_ERROR("failed to allocate map node.\n");
477 return -ENOMEM;
478 }
479
480 /* create property id */
481 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
482 &property->prop_id);
483 if (ret) {
484 DRM_ERROR("failed to create id.\n");
485 goto err_clear;
486 }
487
488 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
489 __func__, property->prop_id, property->cmd, (int)ippdrv);
490
491 /* stored property information and ippdrv in private data */
492 c_node->priv = priv;
493 c_node->property = *property;
494 c_node->state = IPP_STATE_IDLE;
495
496 c_node->start_work = ipp_create_cmd_work();
497 if (IS_ERR_OR_NULL(c_node->start_work)) {
498 DRM_ERROR("failed to create start work.\n");
499 goto err_clear;
500 }
501
502 c_node->stop_work = ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node->stop_work)) {
504 DRM_ERROR("failed to create stop work.\n");
505 goto err_free_start;
506 }
507
508 c_node->event_work = ipp_create_event_work();
509 if (IS_ERR_OR_NULL(c_node->event_work)) {
510 DRM_ERROR("failed to create event work.\n");
511 goto err_free_stop;
512 }
513
514 mutex_init(&c_node->cmd_lock);
515 mutex_init(&c_node->mem_lock);
516 mutex_init(&c_node->event_lock);
517
518 init_completion(&c_node->start_complete);
519 init_completion(&c_node->stop_complete);
520
521 for_each_ipp_ops(i)
522 INIT_LIST_HEAD(&c_node->mem_list[i]);
523
524 INIT_LIST_HEAD(&c_node->event_list);
525 list_splice_init(&priv->event_list, &c_node->event_list);
526 list_add_tail(&c_node->list, &ippdrv->cmd_list);
527
528 /* make dedicated state without m2m */
529 if (!ipp_is_m2m_cmd(property->cmd))
530 ippdrv->dedicated = true;
531
532 return 0;
533
534err_free_stop:
535 kfree(c_node->stop_work);
536err_free_start:
537 kfree(c_node->start_work);
538err_clear:
539 kfree(c_node);
540 return ret;
541}
542
543static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
544{
545 DRM_DEBUG_KMS("%s\n", __func__);
546
547 /* delete list */
548 list_del(&c_node->list);
549
550 /* destroy mutex */
551 mutex_destroy(&c_node->cmd_lock);
552 mutex_destroy(&c_node->mem_lock);
553 mutex_destroy(&c_node->event_lock);
554
555 /* free command node */
556 kfree(c_node->start_work);
557 kfree(c_node->stop_work);
558 kfree(c_node->event_work);
559 kfree(c_node);
560}
561
562static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
563{
564 struct drm_exynos_ipp_property *property = &c_node->property;
565 struct drm_exynos_ipp_mem_node *m_node;
566 struct list_head *head;
567 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
568
569 DRM_DEBUG_KMS("%s\n", __func__);
570
571 mutex_lock(&c_node->mem_lock);
572
573 for_each_ipp_ops(i) {
574 /* source/destination memory list */
575 head = &c_node->mem_list[i];
576
577 if (list_empty(head)) {
578 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
579 i ? "dst" : "src");
580 continue;
581 }
582
583 /* find memory node entry */
584 list_for_each_entry(m_node, head, list) {
585 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
586 i ? "dst" : "src", count[i], (int)m_node);
587 count[i]++;
588 }
589 }
590
591 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
592 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
593 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
594
595 /*
596 * M2M operations should be need paired memory address.
597 * so, need to check minimum count about src, dst.
598 * other case not use paired memory, so use maximum count
599 */
600 if (ipp_is_m2m_cmd(property->cmd))
601 ret = min(count[EXYNOS_DRM_OPS_SRC],
602 count[EXYNOS_DRM_OPS_DST]);
603 else
604 ret = max(count[EXYNOS_DRM_OPS_SRC],
605 count[EXYNOS_DRM_OPS_DST]);
606
607 mutex_unlock(&c_node->mem_lock);
608
609 return ret;
610}
611
612static struct drm_exynos_ipp_mem_node
613 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
614 struct drm_exynos_ipp_queue_buf *qbuf)
615{
616 struct drm_exynos_ipp_mem_node *m_node;
617 struct list_head *head;
618 int count = 0;
619
620 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
621
622 /* source/destination memory list */
623 head = &c_node->mem_list[qbuf->ops_id];
624
625 /* find memory node from memory list */
626 list_for_each_entry(m_node, head, list) {
627 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
628 __func__, count++, (int)m_node);
629
630 /* compare buffer id */
631 if (m_node->buf_id == qbuf->buf_id)
632 return m_node;
633 }
634
635 return NULL;
636}
637
638static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
639 struct drm_exynos_ipp_cmd_node *c_node,
640 struct drm_exynos_ipp_mem_node *m_node)
641{
642 struct exynos_drm_ipp_ops *ops = NULL;
643 int ret = 0;
644
645 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
646
647 if (!m_node) {
648 DRM_ERROR("invalid queue node.\n");
649 return -EFAULT;
650 }
651
652 mutex_lock(&c_node->mem_lock);
653
654 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
655
656 /* get operations callback */
657 ops = ippdrv->ops[m_node->ops_id];
658 if (!ops) {
659 DRM_ERROR("not support ops.\n");
660 ret = -EFAULT;
661 goto err_unlock;
662 }
663
664 /* set address and enable irq */
665 if (ops->set_addr) {
666 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
667 m_node->buf_id, IPP_BUF_ENQUEUE);
668 if (ret) {
669 DRM_ERROR("failed to set addr.\n");
670 goto err_unlock;
671 }
672 }
673
674err_unlock:
675 mutex_unlock(&c_node->mem_lock);
676 return ret;
677}
678
679static struct drm_exynos_ipp_mem_node
680 *ipp_get_mem_node(struct drm_device *drm_dev,
681 struct drm_file *file,
682 struct drm_exynos_ipp_cmd_node *c_node,
683 struct drm_exynos_ipp_queue_buf *qbuf)
684{
685 struct drm_exynos_ipp_mem_node *m_node;
686 struct drm_exynos_ipp_buf_info buf_info;
687 void *addr;
688 int i;
689
690 DRM_DEBUG_KMS("%s\n", __func__);
691
692 mutex_lock(&c_node->mem_lock);
693
694 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
695 if (!m_node) {
696 DRM_ERROR("failed to allocate queue node.\n");
697 goto err_unlock;
698 }
699
700 /* clear base address for error handling */
701 memset(&buf_info, 0x0, sizeof(buf_info));
702
703 /* operations, buffer id */
704 m_node->ops_id = qbuf->ops_id;
705 m_node->prop_id = qbuf->prop_id;
706 m_node->buf_id = qbuf->buf_id;
707
708 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
709 (int)m_node, qbuf->ops_id);
710 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
711 qbuf->prop_id, m_node->buf_id);
712
713 for_each_ipp_planar(i) {
714 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
715 i, qbuf->handle[i]);
716
717 /* get dma address by handle */
718 if (qbuf->handle[i]) {
719 addr = exynos_drm_gem_get_dma_addr(drm_dev,
720 qbuf->handle[i], file);
721 if (IS_ERR(addr)) {
722 DRM_ERROR("failed to get addr.\n");
723 goto err_clear;
724 }
725
726 buf_info.handles[i] = qbuf->handle[i];
727 buf_info.base[i] = *(dma_addr_t *) addr;
728 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
729 __func__, i, buf_info.base[i],
730 (int)buf_info.handles[i]);
731 }
732 }
733
734 m_node->filp = file;
735 m_node->buf_info = buf_info;
736 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
737
738 mutex_unlock(&c_node->mem_lock);
739 return m_node;
740
741err_clear:
742 kfree(m_node);
743err_unlock:
744 mutex_unlock(&c_node->mem_lock);
745 return ERR_PTR(-EFAULT);
746}
747
748static int ipp_put_mem_node(struct drm_device *drm_dev,
749 struct drm_exynos_ipp_cmd_node *c_node,
750 struct drm_exynos_ipp_mem_node *m_node)
751{
752 int i;
753
754 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
755
756 if (!m_node) {
757 DRM_ERROR("invalid dequeue node.\n");
758 return -EFAULT;
759 }
760
761 if (list_empty(&m_node->list)) {
762 DRM_ERROR("empty memory node.\n");
763 return -ENOMEM;
764 }
765
766 mutex_lock(&c_node->mem_lock);
767
768 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
769
770 /* put gem buffer */
771 for_each_ipp_planar(i) {
772 unsigned long handle = m_node->buf_info.handles[i];
773 if (handle)
774 exynos_drm_gem_put_dma_addr(drm_dev, handle,
775 m_node->filp);
776 }
777
778 /* delete list in queue */
779 list_del(&m_node->list);
780 kfree(m_node);
781
782 mutex_unlock(&c_node->mem_lock);
783
784 return 0;
785}
786
787static void ipp_free_event(struct drm_pending_event *event)
788{
789 kfree(event);
790}
791
792static int ipp_get_event(struct drm_device *drm_dev,
793 struct drm_file *file,
794 struct drm_exynos_ipp_cmd_node *c_node,
795 struct drm_exynos_ipp_queue_buf *qbuf)
796{
797 struct drm_exynos_ipp_send_event *e;
798 unsigned long flags;
799
800 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
801 qbuf->ops_id, qbuf->buf_id);
802
803 e = kzalloc(sizeof(*e), GFP_KERNEL);
804
805 if (!e) {
806 DRM_ERROR("failed to allocate event.\n");
807 spin_lock_irqsave(&drm_dev->event_lock, flags);
808 file->event_space += sizeof(e->event);
809 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
810 return -ENOMEM;
811 }
812
813 /* make event */
814 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
815 e->event.base.length = sizeof(e->event);
816 e->event.user_data = qbuf->user_data;
817 e->event.prop_id = qbuf->prop_id;
818 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
819 e->base.event = &e->event.base;
820 e->base.file_priv = file;
821 e->base.destroy = ipp_free_event;
822 list_add_tail(&e->base.link, &c_node->event_list);
823
824 return 0;
825}
826
827static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
828 struct drm_exynos_ipp_queue_buf *qbuf)
829{
830 struct drm_exynos_ipp_send_event *e, *te;
831 int count = 0;
832
833 DRM_DEBUG_KMS("%s\n", __func__);
834
835 if (list_empty(&c_node->event_list)) {
836 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
837 return;
838 }
839
840 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
841 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
842 __func__, count++, (int)e);
843
844 /*
845 * quf == NULL condition means all event deletion.
846 * stop operations want to delete all event list.
847 * another case delete only same buf id.
848 */
849 if (!qbuf) {
850 /* delete list */
851 list_del(&e->base.link);
852 kfree(e);
853 }
854
855 /* compare buffer id */
856 if (qbuf && (qbuf->buf_id ==
857 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
858 /* delete list */
859 list_del(&e->base.link);
860 kfree(e);
861 return;
862 }
863 }
864}
865
Sachin Kamat0bc4a0a2013-01-14 12:29:10 +0530866static void ipp_handle_cmd_work(struct device *dev,
Eunchul Kimcb471f142012-12-14 18:10:31 +0900867 struct exynos_drm_ippdrv *ippdrv,
868 struct drm_exynos_ipp_cmd_work *cmd_work,
869 struct drm_exynos_ipp_cmd_node *c_node)
870{
871 struct ipp_context *ctx = get_ipp_context(dev);
872
873 cmd_work->ippdrv = ippdrv;
874 cmd_work->c_node = c_node;
875 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
876}
877
878static int ipp_queue_buf_with_run(struct device *dev,
879 struct drm_exynos_ipp_cmd_node *c_node,
880 struct drm_exynos_ipp_mem_node *m_node,
881 struct drm_exynos_ipp_queue_buf *qbuf)
882{
883 struct exynos_drm_ippdrv *ippdrv;
884 struct drm_exynos_ipp_property *property;
885 struct exynos_drm_ipp_ops *ops;
886 int ret;
887
888 DRM_DEBUG_KMS("%s\n", __func__);
889
890 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
891 if (IS_ERR_OR_NULL(ippdrv)) {
892 DRM_ERROR("failed to get ipp driver.\n");
893 return -EFAULT;
894 }
895
896 ops = ippdrv->ops[qbuf->ops_id];
897 if (!ops) {
898 DRM_ERROR("failed to get ops.\n");
899 return -EFAULT;
900 }
901
902 property = &c_node->property;
903
904 if (c_node->state != IPP_STATE_START) {
905 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
906 return 0;
907 }
908
909 if (!ipp_check_mem_list(c_node)) {
910 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
911 return 0;
912 }
913
914 /*
915 * If set destination buffer and enabled clock,
916 * then m2m operations need start operations at queue_buf
917 */
918 if (ipp_is_m2m_cmd(property->cmd)) {
919 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
920
921 cmd_work->ctrl = IPP_CTRL_PLAY;
922 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
923 } else {
924 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
925 if (ret) {
926 DRM_ERROR("failed to set m node.\n");
927 return ret;
928 }
929 }
930
931 return 0;
932}
933
934static void ipp_clean_queue_buf(struct drm_device *drm_dev,
935 struct drm_exynos_ipp_cmd_node *c_node,
936 struct drm_exynos_ipp_queue_buf *qbuf)
937{
938 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
939
940 DRM_DEBUG_KMS("%s\n", __func__);
941
942 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
943 /* delete list */
944 list_for_each_entry_safe(m_node, tm_node,
945 &c_node->mem_list[qbuf->ops_id], list) {
946 if (m_node->buf_id == qbuf->buf_id &&
947 m_node->ops_id == qbuf->ops_id)
948 ipp_put_mem_node(drm_dev, c_node, m_node);
949 }
950 }
951}
952
953int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
954 struct drm_file *file)
955{
956 struct drm_exynos_file_private *file_priv = file->driver_priv;
957 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
958 struct device *dev = priv->dev;
959 struct ipp_context *ctx = get_ipp_context(dev);
960 struct drm_exynos_ipp_queue_buf *qbuf = data;
961 struct drm_exynos_ipp_cmd_node *c_node;
962 struct drm_exynos_ipp_mem_node *m_node;
963 int ret;
964
965 DRM_DEBUG_KMS("%s\n", __func__);
966
967 if (!qbuf) {
968 DRM_ERROR("invalid buf parameter.\n");
969 return -EINVAL;
970 }
971
972 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
973 DRM_ERROR("invalid ops parameter.\n");
974 return -EINVAL;
975 }
976
977 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
978 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
979 qbuf->buf_id, qbuf->buf_type);
980
981 /* find command node */
982 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
983 qbuf->prop_id);
984 if (!c_node) {
985 DRM_ERROR("failed to get command node.\n");
986 return -EFAULT;
987 }
988
989 /* buffer control */
990 switch (qbuf->buf_type) {
991 case IPP_BUF_ENQUEUE:
992 /* get memory node */
993 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
994 if (IS_ERR(m_node)) {
995 DRM_ERROR("failed to get m_node.\n");
996 return PTR_ERR(m_node);
997 }
998
999 /*
1000 * first step get event for destination buffer.
1001 * and second step when M2M case run with destination buffer
1002 * if needed.
1003 */
1004 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1005 /* get event for destination buffer */
1006 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1007 if (ret) {
1008 DRM_ERROR("failed to get event.\n");
1009 goto err_clean_node;
1010 }
1011
1012 /*
1013 * M2M case run play control for streaming feature.
1014 * other case set address and waiting.
1015 */
1016 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1017 if (ret) {
1018 DRM_ERROR("failed to run command.\n");
1019 goto err_clean_node;
1020 }
1021 }
1022 break;
1023 case IPP_BUF_DEQUEUE:
1024 mutex_lock(&c_node->cmd_lock);
1025
1026 /* put event for destination buffer */
1027 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1028 ipp_put_event(c_node, qbuf);
1029
1030 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1031
1032 mutex_unlock(&c_node->cmd_lock);
1033 break;
1034 default:
1035 DRM_ERROR("invalid buffer control.\n");
1036 return -EINVAL;
1037 }
1038
1039 return 0;
1040
1041err_clean_node:
1042 DRM_ERROR("clean memory nodes.\n");
1043
1044 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1045 return ret;
1046}
1047
1048static bool exynos_drm_ipp_check_valid(struct device *dev,
1049 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1050{
1051 DRM_DEBUG_KMS("%s\n", __func__);
1052
1053 if (ctrl != IPP_CTRL_PLAY) {
1054 if (pm_runtime_suspended(dev)) {
1055 DRM_ERROR("pm:runtime_suspended.\n");
1056 goto err_status;
1057 }
1058 }
1059
1060 switch (ctrl) {
1061 case IPP_CTRL_PLAY:
1062 if (state != IPP_STATE_IDLE)
1063 goto err_status;
1064 break;
1065 case IPP_CTRL_STOP:
1066 if (state == IPP_STATE_STOP)
1067 goto err_status;
1068 break;
1069 case IPP_CTRL_PAUSE:
1070 if (state != IPP_STATE_START)
1071 goto err_status;
1072 break;
1073 case IPP_CTRL_RESUME:
1074 if (state != IPP_STATE_STOP)
1075 goto err_status;
1076 break;
1077 default:
1078 DRM_ERROR("invalid state.\n");
1079 goto err_status;
1080 break;
1081 }
1082
1083 return true;
1084
1085err_status:
1086 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1087 return false;
1088}
1089
1090int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1091 struct drm_file *file)
1092{
1093 struct drm_exynos_file_private *file_priv = file->driver_priv;
1094 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1095 struct exynos_drm_ippdrv *ippdrv = NULL;
1096 struct device *dev = priv->dev;
1097 struct ipp_context *ctx = get_ipp_context(dev);
1098 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1099 struct drm_exynos_ipp_cmd_work *cmd_work;
1100 struct drm_exynos_ipp_cmd_node *c_node;
1101
1102 DRM_DEBUG_KMS("%s\n", __func__);
1103
1104 if (!ctx) {
1105 DRM_ERROR("invalid context.\n");
1106 return -EINVAL;
1107 }
1108
1109 if (!cmd_ctrl) {
1110 DRM_ERROR("invalid control parameter.\n");
1111 return -EINVAL;
1112 }
1113
1114 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1115 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1116
1117 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1118 if (IS_ERR(ippdrv)) {
1119 DRM_ERROR("failed to get ipp driver.\n");
1120 return PTR_ERR(ippdrv);
1121 }
1122
1123 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1124 cmd_ctrl->prop_id);
1125 if (!c_node) {
1126 DRM_ERROR("invalid command node list.\n");
1127 return -EINVAL;
1128 }
1129
1130 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1131 c_node->state)) {
1132 DRM_ERROR("invalid state.\n");
1133 return -EINVAL;
1134 }
1135
1136 switch (cmd_ctrl->ctrl) {
1137 case IPP_CTRL_PLAY:
1138 if (pm_runtime_suspended(ippdrv->dev))
1139 pm_runtime_get_sync(ippdrv->dev);
1140 c_node->state = IPP_STATE_START;
1141
1142 cmd_work = c_node->start_work;
1143 cmd_work->ctrl = cmd_ctrl->ctrl;
1144 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1145 c_node->state = IPP_STATE_START;
1146 break;
1147 case IPP_CTRL_STOP:
1148 cmd_work = c_node->stop_work;
1149 cmd_work->ctrl = cmd_ctrl->ctrl;
1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1151
1152 if (!wait_for_completion_timeout(&c_node->stop_complete,
1153 msecs_to_jiffies(300))) {
1154 DRM_ERROR("timeout stop:prop_id[%d]\n",
1155 c_node->property.prop_id);
1156 }
1157
1158 c_node->state = IPP_STATE_STOP;
1159 ippdrv->dedicated = false;
1160 ipp_clean_cmd_node(c_node);
1161
1162 if (list_empty(&ippdrv->cmd_list))
1163 pm_runtime_put_sync(ippdrv->dev);
1164 break;
1165 case IPP_CTRL_PAUSE:
1166 cmd_work = c_node->stop_work;
1167 cmd_work->ctrl = cmd_ctrl->ctrl;
1168 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1169
1170 if (!wait_for_completion_timeout(&c_node->stop_complete,
1171 msecs_to_jiffies(200))) {
1172 DRM_ERROR("timeout stop:prop_id[%d]\n",
1173 c_node->property.prop_id);
1174 }
1175
1176 c_node->state = IPP_STATE_STOP;
1177 break;
1178 case IPP_CTRL_RESUME:
1179 c_node->state = IPP_STATE_START;
1180 cmd_work = c_node->start_work;
1181 cmd_work->ctrl = cmd_ctrl->ctrl;
1182 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1183 break;
1184 default:
1185 DRM_ERROR("could not support this state currently.\n");
1186 return -EINVAL;
1187 }
1188
1189 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1190 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1191
1192 return 0;
1193}
1194
1195int exynos_drm_ippnb_register(struct notifier_block *nb)
1196{
1197 return blocking_notifier_chain_register(
1198 &exynos_drm_ippnb_list, nb);
1199}
1200
1201int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1202{
1203 return blocking_notifier_chain_unregister(
1204 &exynos_drm_ippnb_list, nb);
1205}
1206
1207int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1208{
1209 return blocking_notifier_call_chain(
1210 &exynos_drm_ippnb_list, val, v);
1211}
1212
1213static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1214 struct drm_exynos_ipp_property *property)
1215{
1216 struct exynos_drm_ipp_ops *ops = NULL;
1217 bool swap = false;
1218 int ret, i;
1219
1220 if (!property) {
1221 DRM_ERROR("invalid property parameter.\n");
1222 return -EINVAL;
1223 }
1224
1225 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1226
1227 /* reset h/w block */
1228 if (ippdrv->reset &&
1229 ippdrv->reset(ippdrv->dev)) {
1230 DRM_ERROR("failed to reset.\n");
1231 return -EINVAL;
1232 }
1233
1234 /* set source,destination operations */
1235 for_each_ipp_ops(i) {
1236 struct drm_exynos_ipp_config *config =
1237 &property->config[i];
1238
1239 ops = ippdrv->ops[i];
1240 if (!ops || !config) {
1241 DRM_ERROR("not support ops and config.\n");
1242 return -EINVAL;
1243 }
1244
1245 /* set format */
1246 if (ops->set_fmt) {
1247 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1248 if (ret) {
1249 DRM_ERROR("not support format.\n");
1250 return ret;
1251 }
1252 }
1253
1254 /* set transform for rotation, flip */
1255 if (ops->set_transf) {
1256 ret = ops->set_transf(ippdrv->dev, config->degree,
1257 config->flip, &swap);
1258 if (ret) {
1259 DRM_ERROR("not support tranf.\n");
1260 return -EINVAL;
1261 }
1262 }
1263
1264 /* set size */
1265 if (ops->set_size) {
1266 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1267 &config->sz);
1268 if (ret) {
1269 DRM_ERROR("not support size.\n");
1270 return ret;
1271 }
1272 }
1273 }
1274
1275 return 0;
1276}
1277
1278static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1279 struct drm_exynos_ipp_cmd_node *c_node)
1280{
1281 struct drm_exynos_ipp_mem_node *m_node;
1282 struct drm_exynos_ipp_property *property = &c_node->property;
1283 struct list_head *head;
1284 int ret, i;
1285
1286 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1287
1288 /* store command info in ippdrv */
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001289 ippdrv->c_node = c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001290
1291 if (!ipp_check_mem_list(c_node)) {
1292 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1293 return -ENOMEM;
1294 }
1295
1296 /* set current property in ippdrv */
1297 ret = ipp_set_property(ippdrv, property);
1298 if (ret) {
1299 DRM_ERROR("failed to set property.\n");
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001300 ippdrv->c_node = NULL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001301 return ret;
1302 }
1303
1304 /* check command */
1305 switch (property->cmd) {
1306 case IPP_CMD_M2M:
1307 for_each_ipp_ops(i) {
1308 /* source/destination memory list */
1309 head = &c_node->mem_list[i];
1310
1311 m_node = list_first_entry(head,
1312 struct drm_exynos_ipp_mem_node, list);
1313 if (!m_node) {
1314 DRM_ERROR("failed to get node.\n");
1315 ret = -EFAULT;
1316 return ret;
1317 }
1318
1319 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1320 __func__, (int)m_node);
1321
1322 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1323 if (ret) {
1324 DRM_ERROR("failed to set m node.\n");
1325 return ret;
1326 }
1327 }
1328 break;
1329 case IPP_CMD_WB:
1330 /* destination memory list */
1331 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1332
1333 list_for_each_entry(m_node, head, list) {
1334 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1335 if (ret) {
1336 DRM_ERROR("failed to set m node.\n");
1337 return ret;
1338 }
1339 }
1340 break;
1341 case IPP_CMD_OUTPUT:
1342 /* source memory list */
1343 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1344
1345 list_for_each_entry(m_node, head, list) {
1346 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1347 if (ret) {
1348 DRM_ERROR("failed to set m node.\n");
1349 return ret;
1350 }
1351 }
1352 break;
1353 default:
1354 DRM_ERROR("invalid operations.\n");
1355 return -EINVAL;
1356 }
1357
1358 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1359
1360 /* start operations */
1361 if (ippdrv->start) {
1362 ret = ippdrv->start(ippdrv->dev, property->cmd);
1363 if (ret) {
1364 DRM_ERROR("failed to start ops.\n");
1365 return ret;
1366 }
1367 }
1368
1369 return 0;
1370}
1371
1372static int ipp_stop_property(struct drm_device *drm_dev,
1373 struct exynos_drm_ippdrv *ippdrv,
1374 struct drm_exynos_ipp_cmd_node *c_node)
1375{
1376 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1377 struct drm_exynos_ipp_property *property = &c_node->property;
1378 struct list_head *head;
1379 int ret = 0, i;
1380
1381 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1382
1383 /* put event */
1384 ipp_put_event(c_node, NULL);
1385
1386 /* check command */
1387 switch (property->cmd) {
1388 case IPP_CMD_M2M:
1389 for_each_ipp_ops(i) {
1390 /* source/destination memory list */
1391 head = &c_node->mem_list[i];
1392
1393 if (list_empty(head)) {
1394 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1395 __func__);
1396 break;
1397 }
1398
1399 list_for_each_entry_safe(m_node, tm_node,
1400 head, list) {
1401 ret = ipp_put_mem_node(drm_dev, c_node,
1402 m_node);
1403 if (ret) {
1404 DRM_ERROR("failed to put m_node.\n");
1405 goto err_clear;
1406 }
1407 }
1408 }
1409 break;
1410 case IPP_CMD_WB:
1411 /* destination memory list */
1412 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1413
1414 if (list_empty(head)) {
1415 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1416 break;
1417 }
1418
1419 list_for_each_entry_safe(m_node, tm_node, head, list) {
1420 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1421 if (ret) {
1422 DRM_ERROR("failed to put m_node.\n");
1423 goto err_clear;
1424 }
1425 }
1426 break;
1427 case IPP_CMD_OUTPUT:
1428 /* source memory list */
1429 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1430
1431 if (list_empty(head)) {
1432 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1433 break;
1434 }
1435
1436 list_for_each_entry_safe(m_node, tm_node, head, list) {
1437 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1438 if (ret) {
1439 DRM_ERROR("failed to put m_node.\n");
1440 goto err_clear;
1441 }
1442 }
1443 break;
1444 default:
1445 DRM_ERROR("invalid operations.\n");
1446 ret = -EINVAL;
1447 goto err_clear;
1448 }
1449
1450err_clear:
1451 /* stop operations */
1452 if (ippdrv->stop)
1453 ippdrv->stop(ippdrv->dev, property->cmd);
1454
1455 return ret;
1456}
1457
1458void ipp_sched_cmd(struct work_struct *work)
1459{
1460 struct drm_exynos_ipp_cmd_work *cmd_work =
1461 (struct drm_exynos_ipp_cmd_work *)work;
1462 struct exynos_drm_ippdrv *ippdrv;
1463 struct drm_exynos_ipp_cmd_node *c_node;
1464 struct drm_exynos_ipp_property *property;
1465 int ret;
1466
1467 DRM_DEBUG_KMS("%s\n", __func__);
1468
1469 ippdrv = cmd_work->ippdrv;
1470 if (!ippdrv) {
1471 DRM_ERROR("invalid ippdrv list.\n");
1472 return;
1473 }
1474
1475 c_node = cmd_work->c_node;
1476 if (!c_node) {
1477 DRM_ERROR("invalid command node list.\n");
1478 return;
1479 }
1480
1481 mutex_lock(&c_node->cmd_lock);
1482
1483 property = &c_node->property;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001484
1485 switch (cmd_work->ctrl) {
1486 case IPP_CTRL_PLAY:
1487 case IPP_CTRL_RESUME:
1488 ret = ipp_start_property(ippdrv, c_node);
1489 if (ret) {
1490 DRM_ERROR("failed to start property:prop_id[%d]\n",
1491 c_node->property.prop_id);
1492 goto err_unlock;
1493 }
1494
1495 /*
1496 * M2M case supports wait_completion of transfer.
1497 * because M2M case supports single unit operation
1498 * with multiple queue.
1499 * M2M need to wait completion of data transfer.
1500 */
1501 if (ipp_is_m2m_cmd(property->cmd)) {
1502 if (!wait_for_completion_timeout
1503 (&c_node->start_complete, msecs_to_jiffies(200))) {
1504 DRM_ERROR("timeout event:prop_id[%d]\n",
1505 c_node->property.prop_id);
1506 goto err_unlock;
1507 }
1508 }
1509 break;
1510 case IPP_CTRL_STOP:
1511 case IPP_CTRL_PAUSE:
1512 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1513 c_node);
1514 if (ret) {
1515 DRM_ERROR("failed to stop property.\n");
1516 goto err_unlock;
1517 }
1518
1519 complete(&c_node->stop_complete);
1520 break;
1521 default:
1522 DRM_ERROR("unknown control type\n");
1523 break;
1524 }
1525
1526 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1527
1528err_unlock:
1529 mutex_unlock(&c_node->cmd_lock);
1530}
1531
1532static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1533 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1534{
1535 struct drm_device *drm_dev = ippdrv->drm_dev;
1536 struct drm_exynos_ipp_property *property = &c_node->property;
1537 struct drm_exynos_ipp_mem_node *m_node;
1538 struct drm_exynos_ipp_queue_buf qbuf;
1539 struct drm_exynos_ipp_send_event *e;
1540 struct list_head *head;
1541 struct timeval now;
1542 unsigned long flags;
1543 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1544 int ret, i;
1545
1546 for_each_ipp_ops(i)
1547 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1548 i ? "dst" : "src", buf_id[i]);
1549
1550 if (!drm_dev) {
1551 DRM_ERROR("failed to get drm_dev.\n");
1552 return -EINVAL;
1553 }
1554
1555 if (!property) {
1556 DRM_ERROR("failed to get property.\n");
1557 return -EINVAL;
1558 }
1559
1560 if (list_empty(&c_node->event_list)) {
1561 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1562 return 0;
1563 }
1564
1565 if (!ipp_check_mem_list(c_node)) {
1566 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1567 return 0;
1568 }
1569
1570 /* check command */
1571 switch (property->cmd) {
1572 case IPP_CMD_M2M:
1573 for_each_ipp_ops(i) {
1574 /* source/destination memory list */
1575 head = &c_node->mem_list[i];
1576
1577 m_node = list_first_entry(head,
1578 struct drm_exynos_ipp_mem_node, list);
1579 if (!m_node) {
1580 DRM_ERROR("empty memory node.\n");
1581 return -ENOMEM;
1582 }
1583
1584 tbuf_id[i] = m_node->buf_id;
1585 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1586 i ? "dst" : "src", tbuf_id[i]);
1587
1588 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1589 if (ret)
1590 DRM_ERROR("failed to put m_node.\n");
1591 }
1592 break;
1593 case IPP_CMD_WB:
1594 /* clear buf for finding */
1595 memset(&qbuf, 0x0, sizeof(qbuf));
1596 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1597 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1598
1599 /* get memory node entry */
1600 m_node = ipp_find_mem_node(c_node, &qbuf);
1601 if (!m_node) {
1602 DRM_ERROR("empty memory node.\n");
1603 return -ENOMEM;
1604 }
1605
1606 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1607
1608 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1609 if (ret)
1610 DRM_ERROR("failed to put m_node.\n");
1611 break;
1612 case IPP_CMD_OUTPUT:
1613 /* source memory list */
1614 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1615
1616 m_node = list_first_entry(head,
1617 struct drm_exynos_ipp_mem_node, list);
1618 if (!m_node) {
1619 DRM_ERROR("empty memory node.\n");
1620 return -ENOMEM;
1621 }
1622
1623 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1624
1625 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1626 if (ret)
1627 DRM_ERROR("failed to put m_node.\n");
1628 break;
1629 default:
1630 DRM_ERROR("invalid operations.\n");
1631 return -EINVAL;
1632 }
1633
1634 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1635 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1636 tbuf_id[1], buf_id[1], property->prop_id);
1637
1638 /*
1639 * command node have event list of destination buffer
1640 * If destination buffer enqueue to mem list,
1641 * then we make event and link to event list tail.
1642 * so, we get first event for first enqueued buffer.
1643 */
1644 e = list_first_entry(&c_node->event_list,
1645 struct drm_exynos_ipp_send_event, base.link);
1646
1647 if (!e) {
1648 DRM_ERROR("empty event.\n");
1649 return -EINVAL;
1650 }
1651
1652 do_gettimeofday(&now);
1653 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1654 , __func__, now.tv_sec, now.tv_usec);
1655 e->event.tv_sec = now.tv_sec;
1656 e->event.tv_usec = now.tv_usec;
1657 e->event.prop_id = property->prop_id;
1658
1659 /* set buffer id about source destination */
1660 for_each_ipp_ops(i)
1661 e->event.buf_id[i] = tbuf_id[i];
1662
1663 spin_lock_irqsave(&drm_dev->event_lock, flags);
1664 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1665 wake_up_interruptible(&e->base.file_priv->event_wait);
1666 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1667
1668 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1669 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1670
1671 return 0;
1672}
1673
1674void ipp_sched_event(struct work_struct *work)
1675{
1676 struct drm_exynos_ipp_event_work *event_work =
1677 (struct drm_exynos_ipp_event_work *)work;
1678 struct exynos_drm_ippdrv *ippdrv;
1679 struct drm_exynos_ipp_cmd_node *c_node;
1680 int ret;
1681
1682 if (!event_work) {
1683 DRM_ERROR("failed to get event_work.\n");
1684 return;
1685 }
1686
1687 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1688 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1689
1690 ippdrv = event_work->ippdrv;
1691 if (!ippdrv) {
1692 DRM_ERROR("failed to get ipp driver.\n");
1693 return;
1694 }
1695
Eunchul Kim7259c3d2012-12-22 17:49:22 +09001696 c_node = ippdrv->c_node;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001697 if (!c_node) {
1698 DRM_ERROR("failed to get command node.\n");
1699 return;
1700 }
1701
1702 /*
1703 * IPP supports command thread, event thread synchronization.
1704 * If IPP close immediately from user land, then IPP make
1705 * synchronization with command thread, so make complete event.
1706 * or going out operations.
1707 */
1708 if (c_node->state != IPP_STATE_START) {
1709 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1710 __func__, c_node->state, c_node->property.prop_id);
1711 goto err_completion;
1712 }
1713
1714 mutex_lock(&c_node->event_lock);
1715
1716 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1717 if (ret) {
1718 DRM_ERROR("failed to send event.\n");
1719 goto err_completion;
1720 }
1721
1722err_completion:
1723 if (ipp_is_m2m_cmd(c_node->property.cmd))
1724 complete(&c_node->start_complete);
1725
1726 mutex_unlock(&c_node->event_lock);
1727}
1728
1729static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1730{
1731 struct ipp_context *ctx = get_ipp_context(dev);
1732 struct exynos_drm_ippdrv *ippdrv;
1733 int ret, count = 0;
1734
1735 DRM_DEBUG_KMS("%s\n", __func__);
1736
1737 /* get ipp driver entry */
1738 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1739 ippdrv->drm_dev = drm_dev;
1740
1741 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1742 &ippdrv->ipp_id);
1743 if (ret) {
1744 DRM_ERROR("failed to create id.\n");
1745 goto err_idr;
1746 }
1747
1748 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1749 count++, (int)ippdrv, ippdrv->ipp_id);
1750
1751 if (ippdrv->ipp_id == 0) {
1752 DRM_ERROR("failed to get ipp_id[%d]\n",
1753 ippdrv->ipp_id);
1754 goto err_idr;
1755 }
1756
1757 /* store parent device for node */
1758 ippdrv->parent_dev = dev;
1759
1760 /* store event work queue and handler */
1761 ippdrv->event_workq = ctx->event_workq;
1762 ippdrv->sched_event = ipp_sched_event;
1763 INIT_LIST_HEAD(&ippdrv->cmd_list);
Eunchul Kimc12e2612012-12-14 17:58:54 +09001764
1765 if (is_drm_iommu_supported(drm_dev)) {
1766 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1767 if (ret) {
1768 DRM_ERROR("failed to activate iommu\n");
1769 goto err_iommu;
1770 }
1771 }
Eunchul Kimcb471f142012-12-14 18:10:31 +09001772 }
1773
1774 return 0;
1775
Eunchul Kimc12e2612012-12-14 17:58:54 +09001776err_iommu:
1777 /* get ipp driver entry */
1778 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1779 if (is_drm_iommu_supported(drm_dev))
1780 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1781
Eunchul Kimcb471f142012-12-14 18:10:31 +09001782err_idr:
Eunchul Kimcb471f142012-12-14 18:10:31 +09001783 idr_destroy(&ctx->ipp_idr);
1784 idr_destroy(&ctx->prop_idr);
1785 return ret;
1786}
1787
1788static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1789{
1790 struct exynos_drm_ippdrv *ippdrv;
1791
1792 DRM_DEBUG_KMS("%s\n", __func__);
1793
1794 /* get ipp driver entry */
1795 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
Eunchul Kimc12e2612012-12-14 17:58:54 +09001796 if (is_drm_iommu_supported(drm_dev))
1797 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1798
Eunchul Kimcb471f142012-12-14 18:10:31 +09001799 ippdrv->drm_dev = NULL;
1800 exynos_drm_ippdrv_unregister(ippdrv);
1801 }
1802}
1803
1804static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1805 struct drm_file *file)
1806{
1807 struct drm_exynos_file_private *file_priv = file->driver_priv;
1808 struct exynos_drm_ipp_private *priv;
1809
1810 DRM_DEBUG_KMS("%s\n", __func__);
1811
1812 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1813 if (!priv) {
1814 DRM_ERROR("failed to allocate priv.\n");
1815 return -ENOMEM;
1816 }
1817 priv->dev = dev;
1818 file_priv->ipp_priv = priv;
1819
1820 INIT_LIST_HEAD(&priv->event_list);
1821
1822 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1823
1824 return 0;
1825}
1826
1827static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1828 struct drm_file *file)
1829{
1830 struct drm_exynos_file_private *file_priv = file->driver_priv;
1831 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1832 struct exynos_drm_ippdrv *ippdrv = NULL;
1833 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1834 int count = 0;
1835
1836 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1837
1838 if (list_empty(&exynos_drm_ippdrv_list)) {
1839 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1840 goto err_clear;
1841 }
1842
1843 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1844 if (list_empty(&ippdrv->cmd_list))
1845 continue;
1846
1847 list_for_each_entry_safe(c_node, tc_node,
1848 &ippdrv->cmd_list, list) {
1849 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1850 __func__, count++, (int)ippdrv);
1851
1852 if (c_node->priv == priv) {
1853 /*
1854 * userland goto unnormal state. process killed.
1855 * and close the file.
1856 * so, IPP didn't called stop cmd ctrl.
1857 * so, we are make stop operation in this state.
1858 */
1859 if (c_node->state == IPP_STATE_START) {
1860 ipp_stop_property(drm_dev, ippdrv,
1861 c_node);
1862 c_node->state = IPP_STATE_STOP;
1863 }
1864
1865 ippdrv->dedicated = false;
1866 ipp_clean_cmd_node(c_node);
1867 if (list_empty(&ippdrv->cmd_list))
1868 pm_runtime_put_sync(ippdrv->dev);
1869 }
1870 }
1871 }
1872
1873err_clear:
1874 kfree(priv);
1875 return;
1876}
1877
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001878static int ipp_probe(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001879{
1880 struct device *dev = &pdev->dev;
1881 struct ipp_context *ctx;
1882 struct exynos_drm_subdrv *subdrv;
1883 int ret;
1884
Sachin Kamatbfb6ed22012-12-24 14:03:42 +05301885 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001886 if (!ctx)
1887 return -ENOMEM;
1888
1889 DRM_DEBUG_KMS("%s\n", __func__);
1890
1891 mutex_init(&ctx->ipp_lock);
1892 mutex_init(&ctx->prop_lock);
1893
1894 idr_init(&ctx->ipp_idr);
1895 idr_init(&ctx->prop_idr);
1896
1897 /*
1898 * create single thread for ipp event
1899 * IPP supports event thread for IPP drivers.
1900 * IPP driver send event_work to this thread.
1901 * and IPP event thread send event to user process.
1902 */
1903 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1904 if (!ctx->event_workq) {
1905 dev_err(dev, "failed to create event workqueue\n");
Sachin Kamatbfb6ed22012-12-24 14:03:42 +05301906 return -EINVAL;
Eunchul Kimcb471f142012-12-14 18:10:31 +09001907 }
1908
1909 /*
1910 * create single thread for ipp command
1911 * IPP supports command thread for user process.
1912 * user process make command node using set property ioctl.
1913 * and make start_work and send this work to command thread.
1914 * and then this command thread start property.
1915 */
1916 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1917 if (!ctx->cmd_workq) {
1918 dev_err(dev, "failed to create cmd workqueue\n");
1919 ret = -EINVAL;
1920 goto err_event_workq;
1921 }
1922
1923 /* set sub driver informations */
1924 subdrv = &ctx->subdrv;
1925 subdrv->dev = dev;
1926 subdrv->probe = ipp_subdrv_probe;
1927 subdrv->remove = ipp_subdrv_remove;
1928 subdrv->open = ipp_subdrv_open;
1929 subdrv->close = ipp_subdrv_close;
1930
1931 platform_set_drvdata(pdev, ctx);
1932
1933 ret = exynos_drm_subdrv_register(subdrv);
1934 if (ret < 0) {
1935 DRM_ERROR("failed to register drm ipp device.\n");
1936 goto err_cmd_workq;
1937 }
1938
1939 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1940
1941 return 0;
1942
1943err_cmd_workq:
1944 destroy_workqueue(ctx->cmd_workq);
1945err_event_workq:
1946 destroy_workqueue(ctx->event_workq);
Eunchul Kimcb471f142012-12-14 18:10:31 +09001947 return ret;
1948}
1949
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08001950static int ipp_remove(struct platform_device *pdev)
Eunchul Kimcb471f142012-12-14 18:10:31 +09001951{
1952 struct ipp_context *ctx = platform_get_drvdata(pdev);
1953
1954 DRM_DEBUG_KMS("%s\n", __func__);
1955
1956 /* unregister sub driver */
1957 exynos_drm_subdrv_unregister(&ctx->subdrv);
1958
1959 /* remove,destroy ipp idr */
Eunchul Kimcb471f142012-12-14 18:10:31 +09001960 idr_destroy(&ctx->ipp_idr);
1961 idr_destroy(&ctx->prop_idr);
1962
1963 mutex_destroy(&ctx->ipp_lock);
1964 mutex_destroy(&ctx->prop_lock);
1965
1966 /* destroy command, event work queue */
1967 destroy_workqueue(ctx->cmd_workq);
1968 destroy_workqueue(ctx->event_workq);
1969
Eunchul Kimcb471f142012-12-14 18:10:31 +09001970 return 0;
1971}
1972
1973static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1974{
1975 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1976
1977 return 0;
1978}
1979
1980#ifdef CONFIG_PM_SLEEP
1981static int ipp_suspend(struct device *dev)
1982{
1983 struct ipp_context *ctx = get_ipp_context(dev);
1984
1985 DRM_DEBUG_KMS("%s\n", __func__);
1986
1987 if (pm_runtime_suspended(dev))
1988 return 0;
1989
1990 return ipp_power_ctrl(ctx, false);
1991}
1992
1993static int ipp_resume(struct device *dev)
1994{
1995 struct ipp_context *ctx = get_ipp_context(dev);
1996
1997 DRM_DEBUG_KMS("%s\n", __func__);
1998
1999 if (!pm_runtime_suspended(dev))
2000 return ipp_power_ctrl(ctx, true);
2001
2002 return 0;
2003}
2004#endif
2005
2006#ifdef CONFIG_PM_RUNTIME
2007static int ipp_runtime_suspend(struct device *dev)
2008{
2009 struct ipp_context *ctx = get_ipp_context(dev);
2010
2011 DRM_DEBUG_KMS("%s\n", __func__);
2012
2013 return ipp_power_ctrl(ctx, false);
2014}
2015
2016static int ipp_runtime_resume(struct device *dev)
2017{
2018 struct ipp_context *ctx = get_ipp_context(dev);
2019
2020 DRM_DEBUG_KMS("%s\n", __func__);
2021
2022 return ipp_power_ctrl(ctx, true);
2023}
2024#endif
2025
2026static const struct dev_pm_ops ipp_pm_ops = {
2027 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2028 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2029};
2030
2031struct platform_driver ipp_driver = {
2032 .probe = ipp_probe,
Greg Kroah-Hartman56550d92012-12-21 15:09:25 -08002033 .remove = ipp_remove,
Eunchul Kimcb471f142012-12-14 18:10:31 +09002034 .driver = {
2035 .name = "exynos-drm-ipp",
2036 .owner = THIS_MODULE,
2037 .pm = &ipp_pm_ops,
2038 },
2039};
2040