blob: aebae1c2dab2ac8dac11222f31e12847b1273e20 [file] [log] [blame]
Rob Clark6ad11bc2012-04-10 13:19:55 -05001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
Rob Clark6ad11bc2012-04-10 13:19:55 -05003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clark6ad11bc2012-04-10 13:19:55 -050020#include <linux/dma-buf.h>
21
Laurent Pinchart2d278f52015-03-05 21:31:37 +020022#include "omap_drv.h"
23
Rob Clark6ad11bc2012-04-10 13:19:55 -050024static struct sg_table *omap_gem_map_dma_buf(
25 struct dma_buf_attachment *attachment,
26 enum dma_data_direction dir)
27{
28 struct drm_gem_object *obj = attachment->dmabuf->priv;
29 struct sg_table *sg;
30 dma_addr_t paddr;
31 int ret;
32
33 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
34 if (!sg)
35 return ERR_PTR(-ENOMEM);
36
37 /* camera, etc, need physically contiguous.. but we need a
38 * better way to know this..
39 */
40 ret = omap_gem_get_paddr(obj, &paddr, true);
41 if (ret)
42 goto out;
43
44 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
45 if (ret)
46 goto out;
47
48 sg_init_table(sg->sgl, 1);
49 sg_dma_len(sg->sgl) = obj->size;
50 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
51 sg_dma_address(sg->sgl) = paddr;
52
Rob Clark8b6b5692012-05-17 02:37:25 -060053 /* this should be after _get_paddr() to ensure we have pages attached */
54 omap_gem_dma_sync(obj, dir);
55
Rob Clark6ad11bc2012-04-10 13:19:55 -050056 return sg;
Cong Ding32ac1a52013-01-15 20:46:50 +010057out:
58 kfree(sg);
59 return ERR_PTR(ret);
Rob Clark6ad11bc2012-04-10 13:19:55 -050060}
61
62static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
63 struct sg_table *sg, enum dma_data_direction dir)
64{
65 struct drm_gem_object *obj = attachment->dmabuf->priv;
66 omap_gem_put_paddr(obj);
67 sg_free_table(sg);
68 kfree(sg);
69}
70
71static void omap_gem_dmabuf_release(struct dma_buf *buffer)
72{
73 struct drm_gem_object *obj = buffer->priv;
74 /* release reference that was taken when dmabuf was exported
75 * in omap_gem_prime_set()..
76 */
77 drm_gem_object_unreference_unlocked(obj);
78}
79
80
81static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
Tiago Vignatti831e9da2015-12-22 19:36:45 -020082 enum dma_data_direction dir)
Rob Clark6ad11bc2012-04-10 13:19:55 -050083{
84 struct drm_gem_object *obj = buffer->priv;
85 struct page **pages;
86 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
87 /* TODO we would need to pin at least part of the buffer to
88 * get de-tiled view. For now just reject it.
89 */
90 return -ENOMEM;
91 }
92 /* make sure we have the pages: */
93 return omap_gem_get_pages(obj, &pages, true);
94}
95
96static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
Tiago Vignatti831e9da2015-12-22 19:36:45 -020097 enum dma_data_direction dir)
Rob Clark6ad11bc2012-04-10 13:19:55 -050098{
99 struct drm_gem_object *obj = buffer->priv;
100 omap_gem_put_pages(obj);
101}
102
103
104static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
105 unsigned long page_num)
106{
107 struct drm_gem_object *obj = buffer->priv;
108 struct page **pages;
109 omap_gem_get_pages(obj, &pages, false);
Rob Clark8b6b5692012-05-17 02:37:25 -0600110 omap_gem_cpu_sync(obj, page_num);
Rob Clark6ad11bc2012-04-10 13:19:55 -0500111 return kmap_atomic(pages[page_num]);
112}
113
114static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
115 unsigned long page_num, void *addr)
116{
117 kunmap_atomic(addr);
118}
119
120static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
121 unsigned long page_num)
122{
123 struct drm_gem_object *obj = buffer->priv;
124 struct page **pages;
125 omap_gem_get_pages(obj, &pages, false);
Rob Clark8b6b5692012-05-17 02:37:25 -0600126 omap_gem_cpu_sync(obj, page_num);
Rob Clark6ad11bc2012-04-10 13:19:55 -0500127 return kmap(pages[page_num]);
128}
129
130static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
131 unsigned long page_num, void *addr)
132{
133 struct drm_gem_object *obj = buffer->priv;
134 struct page **pages;
135 omap_gem_get_pages(obj, &pages, false);
136 kunmap(pages[page_num]);
137}
138
Rob Clark8b6b5692012-05-17 02:37:25 -0600139static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
140 struct vm_area_struct *vma)
141{
142 struct drm_gem_object *obj = buffer->priv;
143 int ret = 0;
144
145 if (WARN_ON(!obj->filp))
146 return -EINVAL;
147
Laurent Pinchartbda3fda2013-04-16 14:21:23 +0200148 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
149 if (ret < 0)
150 return ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600151
152 return omap_gem_mmap_obj(obj, vma);
153}
154
Tomi Valkeinen6717cd22013-04-10 10:44:00 +0300155static struct dma_buf_ops omap_dmabuf_ops = {
Laurent Pinchart222025e2015-01-11 00:02:07 +0200156 .map_dma_buf = omap_gem_map_dma_buf,
157 .unmap_dma_buf = omap_gem_unmap_dma_buf,
158 .release = omap_gem_dmabuf_release,
159 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
160 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
161 .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
162 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
163 .kmap = omap_gem_dmabuf_kmap,
164 .kunmap = omap_gem_dmabuf_kunmap,
165 .mmap = omap_gem_dmabuf_mmap,
Rob Clark6ad11bc2012-04-10 13:19:55 -0500166};
167
YAMANE Toshiaki11d3d272012-11-14 19:40:14 +0900168struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
Rob Clark6ad11bc2012-04-10 13:19:55 -0500169 struct drm_gem_object *obj, int flags)
170{
Sumit Semwald8fbe342015-01-23 12:53:43 +0530171 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
172
173 exp_info.ops = &omap_dmabuf_ops;
174 exp_info.size = obj->size;
175 exp_info.flags = flags;
176 exp_info.priv = obj;
177
178 return dma_buf_export(&exp_info);
Rob Clark6ad11bc2012-04-10 13:19:55 -0500179}
Rob Clark3080b832012-05-17 02:37:26 -0600180
YAMANE Toshiaki11d3d272012-11-14 19:40:14 +0900181struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
Rob Clark3080b832012-05-17 02:37:26 -0600182 struct dma_buf *buffer)
183{
184 struct drm_gem_object *obj;
185
186 /* is this one of own objects? */
187 if (buffer->ops == &omap_dmabuf_ops) {
188 obj = buffer->priv;
189 /* is it from our device? */
190 if (obj->dev == dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900191 /*
192 * Importing dmabuf exported from out own gem increases
193 * refcount on gem itself instead of f_count of dmabuf.
194 */
Rob Clark3080b832012-05-17 02:37:26 -0600195 drm_gem_object_reference(obj);
196 return obj;
197 }
198 }
199
200 /*
201 * TODO add support for importing buffers from other devices..
202 * for now we don't need this but would be nice to add eventually
203 */
204 return ERR_PTR(-EINVAL);
205}