blob: eca4726f414d5f1d59b5f3ff0ad3511b008e3a45 [file] [log] [blame]
Daniel Vetter1286ff72012-05-10 15:25:09 +02001/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include "drmP.h"
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
Dave Airlie6a101cb2012-05-23 14:09:32 +010030static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
Daniel Vetter1286ff72012-05-10 15:25:09 +020031 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev;
35 int npages = obj->base.size / PAGE_SIZE;
Chris Wilson6c085a72012-08-20 11:40:46 +020036 struct sg_table *sg;
Daniel Vetter1286ff72012-05-10 15:25:09 +020037 int ret;
38 int nents;
39
40 ret = i915_mutex_lock_interruptible(dev);
41 if (ret)
42 return ERR_PTR(ret);
43
Chris Wilson37e680a2012-06-07 15:38:42 +010044 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +020045 if (ret) {
46 sg = ERR_PTR(ret);
47 goto out;
Daniel Vetter1286ff72012-05-10 15:25:09 +020048 }
49
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
Chris Wilsona5570172012-09-04 21:02:54 +010053 i915_gem_object_pin_pages(obj);
54
Daniel Vetter1286ff72012-05-10 15:25:09 +020055out:
56 mutex_unlock(&dev->struct_mutex);
57 return sg;
58}
59
Dave Airlie6a101cb2012-05-23 14:09:32 +010060static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
Daniel Vetter1286ff72012-05-10 15:25:09 +020061 struct sg_table *sg, enum dma_data_direction dir)
62{
63 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
64 sg_free_table(sg);
65 kfree(sg);
66}
67
Dave Airlie6a101cb2012-05-23 14:09:32 +010068static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
Daniel Vetter1286ff72012-05-10 15:25:09 +020069{
70 struct drm_i915_gem_object *obj = dma_buf->priv;
71
72 if (obj->base.export_dma_buf == dma_buf) {
73 /* drop the reference on the export fd holds */
74 obj->base.export_dma_buf = NULL;
75 drm_gem_object_unreference_unlocked(&obj->base);
76 }
77}
78
Dave Airlie9a70cc22012-05-22 13:09:21 +010079static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
80{
81 struct drm_i915_gem_object *obj = dma_buf->priv;
82 struct drm_device *dev = obj->base.dev;
83 int ret;
84
85 ret = i915_mutex_lock_interruptible(dev);
86 if (ret)
87 return ERR_PTR(ret);
88
89 if (obj->dma_buf_vmapping) {
90 obj->vmapping_count++;
91 goto out_unlock;
92 }
93
Chris Wilson37e680a2012-06-07 15:38:42 +010094 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +020095 if (ret) {
96 mutex_unlock(&dev->struct_mutex);
97 return ERR_PTR(ret);
Dave Airlie9a70cc22012-05-22 13:09:21 +010098 }
99
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
101 if (!obj->dma_buf_vmapping) {
102 DRM_ERROR("failed to vmap object\n");
103 goto out_unlock;
104 }
105
106 obj->vmapping_count = 1;
Chris Wilsona5570172012-09-04 21:02:54 +0100107 i915_gem_object_pin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100108out_unlock:
109 mutex_unlock(&dev->struct_mutex);
110 return obj->dma_buf_vmapping;
111}
112
113static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
114{
115 struct drm_i915_gem_object *obj = dma_buf->priv;
116 struct drm_device *dev = obj->base.dev;
117 int ret;
118
119 ret = i915_mutex_lock_interruptible(dev);
120 if (ret)
121 return;
122
Chris Wilsona5570172012-09-04 21:02:54 +0100123 if (--obj->vmapping_count == 0) {
Dave Airlie9a70cc22012-05-22 13:09:21 +0100124 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL;
Chris Wilsona5570172012-09-04 21:02:54 +0100126
127 i915_gem_object_unpin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100128 }
129 mutex_unlock(&dev->struct_mutex);
130}
131
Daniel Vetter1286ff72012-05-10 15:25:09 +0200132static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
133{
134 return NULL;
135}
136
137static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
138{
139
140}
141static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
142{
143 return NULL;
144}
145
146static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
147{
148
149}
150
Dave Airlie2dad9d42012-05-29 15:11:22 +0100151static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
152{
153 return -EINVAL;
154}
155
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000156static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
157{
158 struct drm_i915_gem_object *obj = dma_buf->priv;
159 struct drm_device *dev = obj->base.dev;
160 int ret;
161 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
162
163 ret = i915_mutex_lock_interruptible(dev);
164 if (ret)
165 return ret;
166
167 ret = i915_gem_object_set_to_cpu_domain(obj, write);
168 mutex_unlock(&dev->struct_mutex);
169 return ret;
170}
171
Dave Airlie6a101cb2012-05-23 14:09:32 +0100172static const struct dma_buf_ops i915_dmabuf_ops = {
Daniel Vetter1286ff72012-05-10 15:25:09 +0200173 .map_dma_buf = i915_gem_map_dma_buf,
174 .unmap_dma_buf = i915_gem_unmap_dma_buf,
175 .release = i915_gem_dmabuf_release,
176 .kmap = i915_gem_dmabuf_kmap,
177 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
178 .kunmap = i915_gem_dmabuf_kunmap,
179 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
Dave Airlie2dad9d42012-05-29 15:11:22 +0100180 .mmap = i915_gem_dmabuf_mmap,
Dave Airlie9a70cc22012-05-22 13:09:21 +0100181 .vmap = i915_gem_dmabuf_vmap,
182 .vunmap = i915_gem_dmabuf_vunmap,
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000183 .begin_cpu_access = i915_gem_begin_cpu_access,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200184};
185
186struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
187 struct drm_gem_object *gem_obj, int flags)
188{
189 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
190
191 return dma_buf_export(obj, &i915_dmabuf_ops,
192 obj->base.size, 0600);
193}
194
195struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
196 struct dma_buf *dma_buf)
197{
198 struct dma_buf_attachment *attach;
199 struct sg_table *sg;
200 struct drm_i915_gem_object *obj;
201 int npages;
202 int size;
203 int ret;
204
205 /* is this one of own objects? */
206 if (dma_buf->ops == &i915_dmabuf_ops) {
207 obj = dma_buf->priv;
208 /* is it from our device? */
209 if (obj->base.dev == dev) {
210 drm_gem_object_reference(&obj->base);
211 return &obj->base;
212 }
213 }
214
215 /* need to attach */
216 attach = dma_buf_attach(dma_buf, dev->dev);
217 if (IS_ERR(attach))
218 return ERR_CAST(attach);
219
220 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
221 if (IS_ERR(sg)) {
222 ret = PTR_ERR(sg);
223 goto fail_detach;
224 }
225
226 size = dma_buf->size;
227 npages = size / PAGE_SIZE;
228
229 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
230 if (obj == NULL) {
231 ret = -ENOMEM;
232 goto fail_unmap;
233 }
234
235 ret = drm_gem_private_object_init(dev, &obj->base, size);
236 if (ret) {
237 kfree(obj);
238 goto fail_unmap;
239 }
240
241 obj->sg_table = sg;
242 obj->base.import_attach = attach;
243
244 return &obj->base;
245
246fail_unmap:
247 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
248fail_detach:
249 dma_buf_detach(dma_buf, attach);
250 return ERR_PTR(ret);
251}