blob: fd76449cf452eb5bd0c90fda808bdb6543e7f477 [file] [log] [blame]
Inki Daeb2df26c2012-04-23 21:01:28 +09001/* exynos_drm_dmabuf.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Daeb2df26c2012-04-23 21:01:28 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
Inki Daeb2df26c2012-04-23 21:01:28 +090014#include "exynos_drm_drv.h"
15#include "exynos_drm_gem.h"
16
17#include <linux/dma-buf.h>
18
Inki Daea7b362f2012-11-28 19:09:31 +090019struct exynos_drm_dmabuf_attachment {
20 struct sg_table sgt;
21 enum dma_data_direction dir;
Inki Daeb8b5c132013-01-11 13:46:58 +090022 bool is_mapped;
Inki Daea7b362f2012-11-28 19:09:31 +090023};
24
Inki Daed0ed8d22013-08-15 00:02:31 +020025static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
26{
27 return to_exynos_gem_obj(buf->priv);
28}
29
Inki Daea7b362f2012-11-28 19:09:31 +090030static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
31 struct device *dev,
32 struct dma_buf_attachment *attach)
Inki Daeb2df26c2012-04-23 21:01:28 +090033{
Inki Daea7b362f2012-11-28 19:09:31 +090034 struct exynos_drm_dmabuf_attachment *exynos_attach;
Inki Daeb2df26c2012-04-23 21:01:28 +090035
Inki Daea7b362f2012-11-28 19:09:31 +090036 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
37 if (!exynos_attach)
38 return -ENOMEM;
Inki Daeb2df26c2012-04-23 21:01:28 +090039
Inki Daea7b362f2012-11-28 19:09:31 +090040 exynos_attach->dir = DMA_NONE;
41 attach->priv = exynos_attach;
Inki Daeb2df26c2012-04-23 21:01:28 +090042
Inki Daea7b362f2012-11-28 19:09:31 +090043 return 0;
44}
Inki Daeb2df26c2012-04-23 21:01:28 +090045
Inki Daea7b362f2012-11-28 19:09:31 +090046static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
47 struct dma_buf_attachment *attach)
48{
49 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
50 struct sg_table *sgt;
51
52 if (!exynos_attach)
53 return;
54
55 sgt = &exynos_attach->sgt;
56
57 if (exynos_attach->dir != DMA_NONE)
58 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
59 exynos_attach->dir);
60
61 sg_free_table(sgt);
62 kfree(exynos_attach);
63 attach->priv = NULL;
Inki Daeb2df26c2012-04-23 21:01:28 +090064}
65
66static struct sg_table *
67 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
68 enum dma_data_direction dir)
69{
Inki Daea7b362f2012-11-28 19:09:31 +090070 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
Inki Daed0ed8d22013-08-15 00:02:31 +020071 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
Inki Daeb2df26c2012-04-23 21:01:28 +090072 struct drm_device *dev = gem_obj->base.dev;
73 struct exynos_drm_gem_buf *buf;
Inki Daea7b362f2012-11-28 19:09:31 +090074 struct scatterlist *rd, *wr;
Inki Daeb2df26c2012-04-23 21:01:28 +090075 struct sg_table *sgt = NULL;
Inki Daea7b362f2012-11-28 19:09:31 +090076 unsigned int i;
77 int nents, ret;
Inki Daeb2df26c2012-04-23 21:01:28 +090078
Inki Daea7b362f2012-11-28 19:09:31 +090079 /* just return current sgt if already requested. */
Inki Daeb8b5c132013-01-11 13:46:58 +090080 if (exynos_attach->dir == dir && exynos_attach->is_mapped)
Inki Daea7b362f2012-11-28 19:09:31 +090081 return &exynos_attach->sgt;
82
Inki Dae0519f9a2012-10-20 07:53:42 -070083 buf = gem_obj->buffer;
84 if (!buf) {
85 DRM_ERROR("buffer is null.\n");
Inki Daea7b362f2012-11-28 19:09:31 +090086 return ERR_PTR(-ENOMEM);
87 }
88
89 sgt = &exynos_attach->sgt;
90
91 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
92 if (ret) {
93 DRM_ERROR("failed to alloc sgt.\n");
94 return ERR_PTR(-ENOMEM);
Inki Dae0519f9a2012-10-20 07:53:42 -070095 }
96
Inki Daeb2df26c2012-04-23 21:01:28 +090097 mutex_lock(&dev->struct_mutex);
98
Inki Daea7b362f2012-11-28 19:09:31 +090099 rd = buf->sgt->sgl;
100 wr = sgt->sgl;
101 for (i = 0; i < sgt->orig_nents; ++i) {
102 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
103 rd = sg_next(rd);
104 wr = sg_next(wr);
105 }
Inki Daeb2df26c2012-04-23 21:01:28 +0900106
Inki Daeb8b5c132013-01-11 13:46:58 +0900107 if (dir != DMA_NONE) {
108 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
109 if (!nents) {
110 DRM_ERROR("failed to map sgl with iommu.\n");
111 sg_free_table(sgt);
112 sgt = ERR_PTR(-EIO);
113 goto err_unlock;
114 }
Inki Dae0519f9a2012-10-20 07:53:42 -0700115 }
Inki Daeb2df26c2012-04-23 21:01:28 +0900116
Inki Daeb8b5c132013-01-11 13:46:58 +0900117 exynos_attach->is_mapped = true;
Inki Daea7b362f2012-11-28 19:09:31 +0900118 exynos_attach->dir = dir;
119 attach->priv = exynos_attach;
120
Prathyush K465ed662012-11-20 19:32:56 +0900121 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
Inki Daeb2df26c2012-04-23 21:01:28 +0900122
123err_unlock:
124 mutex_unlock(&dev->struct_mutex);
125 return sgt;
126}
127
128static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
129 struct sg_table *sgt,
130 enum dma_data_direction dir)
131{
Inki Daea7b362f2012-11-28 19:09:31 +0900132 /* Nothing to do. */
Inki Daeb2df26c2012-04-23 21:01:28 +0900133}
134
Inki Daeb2df26c2012-04-23 21:01:28 +0900135static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
136 unsigned long page_num)
137{
138 /* TODO */
139
140 return NULL;
141}
142
143static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
144 unsigned long page_num,
145 void *addr)
146{
147 /* TODO */
148}
149
150static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
151 unsigned long page_num)
152{
153 /* TODO */
154
155 return NULL;
156}
157
158static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
159 unsigned long page_num, void *addr)
160{
161 /* TODO */
162}
163
Tomasz Stanislawskib716d462012-09-05 19:31:56 +0900164static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
165 struct vm_area_struct *vma)
166{
167 return -ENOTTY;
168}
169
Inki Daeb2df26c2012-04-23 21:01:28 +0900170static struct dma_buf_ops exynos_dmabuf_ops = {
Inki Daea7b362f2012-11-28 19:09:31 +0900171 .attach = exynos_gem_attach_dma_buf,
172 .detach = exynos_gem_detach_dma_buf,
Inki Daeb2df26c2012-04-23 21:01:28 +0900173 .map_dma_buf = exynos_gem_map_dma_buf,
174 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
175 .kmap = exynos_gem_dmabuf_kmap,
176 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
177 .kunmap = exynos_gem_dmabuf_kunmap,
178 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
Tomasz Stanislawskib716d462012-09-05 19:31:56 +0900179 .mmap = exynos_gem_dmabuf_mmap,
Daniel Vetterc1d67982013-08-15 00:02:30 +0200180 .release = drm_gem_dmabuf_release,
Inki Daeb2df26c2012-04-23 21:01:28 +0900181};
182
183struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
184 struct drm_gem_object *obj, int flags)
185{
186 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
187
Inki Daed0ed8d22013-08-15 00:02:31 +0200188 return dma_buf_export(obj, &exynos_dmabuf_ops,
Seung-Woo Kimf4fd9bd2012-12-20 16:39:35 +0900189 exynos_gem_obj->base.size, flags);
Inki Daeb2df26c2012-04-23 21:01:28 +0900190}
191
192struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
193 struct dma_buf *dma_buf)
194{
195 struct dma_buf_attachment *attach;
196 struct sg_table *sgt;
197 struct scatterlist *sgl;
198 struct exynos_drm_gem_obj *exynos_gem_obj;
199 struct exynos_drm_gem_buf *buffer;
Inki Dae47fcdce2012-06-07 16:15:07 +0900200 int ret;
Inki Daeb2df26c2012-04-23 21:01:28 +0900201
Inki Daeb2df26c2012-04-23 21:01:28 +0900202 /* is this one of own objects? */
203 if (dma_buf->ops == &exynos_dmabuf_ops) {
204 struct drm_gem_object *obj;
205
Inki Daed0ed8d22013-08-15 00:02:31 +0200206 obj = dma_buf->priv;
Inki Daeb2df26c2012-04-23 21:01:28 +0900207
208 /* is it from our device? */
209 if (obj->dev == drm_dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900210 /*
211 * Importing dmabuf exported from out own gem increases
212 * refcount on gem itself instead of f_count of dmabuf.
213 */
Inki Daeb2df26c2012-04-23 21:01:28 +0900214 drm_gem_object_reference(obj);
215 return obj;
216 }
217 }
218
219 attach = dma_buf_attach(dma_buf, drm_dev->dev);
220 if (IS_ERR(attach))
221 return ERR_PTR(-EINVAL);
222
Imre Deak011c22822013-04-19 11:11:56 +1000223 get_dma_buf(dma_buf);
Inki Daeb2df26c2012-04-23 21:01:28 +0900224
225 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
Subash Patel0dd3b722012-06-25 11:22:57 -0700226 if (IS_ERR_OR_NULL(sgt)) {
Inki Daeb2df26c2012-04-23 21:01:28 +0900227 ret = PTR_ERR(sgt);
228 goto err_buf_detach;
229 }
230
231 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
232 if (!buffer) {
233 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
234 ret = -ENOMEM;
235 goto err_unmap_attach;
236 }
237
Inki Dae0519f9a2012-10-20 07:53:42 -0700238 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
239 if (!exynos_gem_obj) {
Inki Daeb2df26c2012-04-23 21:01:28 +0900240 ret = -ENOMEM;
241 goto err_free_buffer;
242 }
243
Inki Daeb2df26c2012-04-23 21:01:28 +0900244 sgl = sgt->sgl;
Inki Daeb2df26c2012-04-23 21:01:28 +0900245
Inki Dae0519f9a2012-10-20 07:53:42 -0700246 buffer->size = dma_buf->size;
247 buffer->dma_addr = sg_dma_address(sgl);
Inki Dae47fcdce2012-06-07 16:15:07 +0900248
Inki Dae0519f9a2012-10-20 07:53:42 -0700249 if (sgt->nents == 1) {
Inki Dae47fcdce2012-06-07 16:15:07 +0900250 /* always physically continuous memory if sgt->nents is 1. */
251 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
252 } else {
Inki Dae0519f9a2012-10-20 07:53:42 -0700253 /*
254 * this case could be CONTIG or NONCONTIG type but for now
255 * sets NONCONTIG.
256 * TODO. we have to find a way that exporter can notify
257 * the type of its own buffer to importer.
258 */
Inki Dae47fcdce2012-06-07 16:15:07 +0900259 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
Inki Daeb2df26c2012-04-23 21:01:28 +0900260 }
261
262 exynos_gem_obj->buffer = buffer;
263 buffer->sgt = sgt;
264 exynos_gem_obj->base.import_attach = attach;
265
266 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
267 buffer->size);
268
269 return &exynos_gem_obj->base;
270
Inki Daeb2df26c2012-04-23 21:01:28 +0900271err_free_buffer:
272 kfree(buffer);
273 buffer = NULL;
274err_unmap_attach:
275 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
276err_buf_detach:
277 dma_buf_detach(dma_buf, attach);
Imre Deak011c22822013-04-19 11:11:56 +1000278 dma_buf_put(dma_buf);
279
Inki Daeb2df26c2012-04-23 21:01:28 +0900280 return ERR_PTR(ret);
281}
282
283MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
284MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
285MODULE_LICENSE("GPL");