blob: e15cb1fe2c393bca0cba1d38d040300024aff9c5 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
Daniel Vetter441921d2011-02-18 17:59:16 +010035 BUG();
36
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037 return 0;
38}
39
40void radeon_gem_object_free(struct drm_gem_object *gobj)
41{
Daniel Vetter7e4d15d2011-02-18 17:59:17 +010042 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020043
Jerome Glisse771fe6b2009-06-05 14:42:42 +020044 if (robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +010045 radeon_bo_unref(&robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046 }
47}
48
49int radeon_gem_object_create(struct radeon_device *rdev, int size,
Jerome Glisse4c788672009-11-20 14:29:23 +010050 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020053{
Jerome Glisse4c788672009-11-20 14:29:23 +010054 struct radeon_bo *robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020055 int r;
56
57 *obj = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020058 /* At least align on page size */
59 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE;
61 }
Daniel Vetter441921d2011-02-18 17:59:16 +010062 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020063 if (r) {
Dave Airlieecabd322009-12-15 10:39:48 +100064 if (r != -ERESTARTSYS)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
66 size, initial_domain, alignment, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020067 return r;
68 }
Daniel Vetter441921d2011-02-18 17:59:16 +010069 *obj = &robj->gem_base;
70
71 mutex_lock(&rdev->gem.mutex);
72 list_add_tail(&robj->list, &rdev->gem.objects);
73 mutex_unlock(&rdev->gem.mutex);
74
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075 return 0;
76}
77
Jerome Glisse771fe6b2009-06-05 14:42:42 +020078int radeon_gem_set_domain(struct drm_gem_object *gobj,
79 uint32_t rdomain, uint32_t wdomain)
80{
Jerome Glisse4c788672009-11-20 14:29:23 +010081 struct radeon_bo *robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020082 uint32_t domain;
83 int r;
84
85 /* FIXME: reeimplement */
Daniel Vetter7e4d15d2011-02-18 17:59:17 +010086 robj = gem_to_radeon_bo(gobj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020087 /* work out where to validate the buffer to */
88 domain = wdomain;
89 if (!domain) {
90 domain = rdomain;
91 }
92 if (!domain) {
93 /* Do nothings */
94 printk(KERN_WARNING "Set domain withou domain !\n");
95 return 0;
96 }
97 if (domain == RADEON_GEM_DOMAIN_CPU) {
98 /* Asking for cpu access wait for object idle */
Jerome Glisse4c788672009-11-20 14:29:23 +010099 r = radeon_bo_wait(robj, NULL, false);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200100 if (r) {
101 printk(KERN_ERR "Failed to wait for object !\n");
102 return r;
103 }
104 }
105 return 0;
106}
107
108int radeon_gem_init(struct radeon_device *rdev)
109{
110 INIT_LIST_HEAD(&rdev->gem.objects);
111 return 0;
112}
113
114void radeon_gem_fini(struct radeon_device *rdev)
115{
Jerome Glisse4c788672009-11-20 14:29:23 +0100116 radeon_bo_force_delete(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200117}
118
Jerome Glisse721604a2012-01-05 22:11:05 -0500119/*
120 * Call from drm_gem_handle_create which appear in both new and open ioctl
121 * case.
122 */
123int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
124{
125 return 0;
126}
127
128void radeon_gem_object_close(struct drm_gem_object *obj,
129 struct drm_file *file_priv)
130{
131 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
132 struct radeon_device *rdev = rbo->rdev;
133 struct radeon_fpriv *fpriv = file_priv->driver_priv;
134 struct radeon_vm *vm = &fpriv->vm;
135 struct radeon_bo_va *bo_va, *tmp;
136
137 if (rdev->family < CHIP_CAYMAN) {
138 return;
139 }
140
141 if (radeon_bo_reserve(rbo, false)) {
142 return;
143 }
144 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
145 if (bo_va->vm == vm) {
146 /* remove from this vm address space */
147 mutex_lock(&vm->mutex);
148 list_del(&bo_va->vm_list);
149 mutex_unlock(&vm->mutex);
150 list_del(&bo_va->bo_list);
151 kfree(bo_va);
152 }
153 }
154 radeon_bo_unreserve(rbo);
155}
156
Christian König6c6f4782012-05-02 15:11:19 +0200157static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
158{
159 if (r == -EDEADLK) {
160 radeon_mutex_lock(&rdev->cs_mutex);
161 r = radeon_gpu_reset(rdev);
162 if (!r)
163 r = -EAGAIN;
164 radeon_mutex_unlock(&rdev->cs_mutex);
165 }
166 return r;
167}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200168
169/*
170 * GEM ioctls.
171 */
172int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
173 struct drm_file *filp)
174{
175 struct radeon_device *rdev = dev->dev_private;
176 struct drm_radeon_gem_info *args = data;
Dave Airlie53595332011-03-14 09:47:24 +1000177 struct ttm_mem_type_manager *man;
Christian Königbf852792011-10-13 13:19:22 +0200178 unsigned i;
Dave Airlie53595332011-03-14 09:47:24 +1000179
180 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181
Dave Airlie7a50f012009-07-21 20:39:30 +1000182 args->vram_size = rdev->mc.real_vram_size;
Dave Airlie53595332011-03-14 09:47:24 +1000183 args->vram_visible = (u64)man->size << PAGE_SHIFT;
Michel Dänzer38e14922009-08-05 00:19:51 +0200184 if (rdev->stollen_vga_memory)
Jerome Glisse4c788672009-11-20 14:29:23 +0100185 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
Dave Airlie38651672010-03-30 05:34:13 +0000186 args->vram_visible -= radeon_fbdev_total_size(rdev);
Christian König7b1f2482011-09-23 15:11:23 +0200187 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
Christian Königbf852792011-10-13 13:19:22 +0200188 for(i = 0; i < RADEON_NUM_RINGS; ++i)
Christian Könige32eb502011-10-23 12:56:27 +0200189 args->gart_size -= rdev->ring[i].ring_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200190 return 0;
191}
192
193int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
194 struct drm_file *filp)
195{
196 /* TODO: implement */
197 DRM_ERROR("unimplemented %s\n", __func__);
198 return -ENOSYS;
199}
200
201int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
202 struct drm_file *filp)
203{
204 /* TODO: implement */
205 DRM_ERROR("unimplemented %s\n", __func__);
206 return -ENOSYS;
207}
208
209int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
210 struct drm_file *filp)
211{
212 struct radeon_device *rdev = dev->dev_private;
213 struct drm_radeon_gem_create *args = data;
214 struct drm_gem_object *gobj;
215 uint32_t handle;
216 int r;
217
218 /* create a gem object to contain this object in */
219 args->size = roundup(args->size, PAGE_SIZE);
220 r = radeon_gem_object_create(rdev, args->size, args->alignment,
Jerome Glisse4c788672009-11-20 14:29:23 +0100221 args->initial_domain, false,
222 false, &gobj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200223 if (r) {
Christian König6c6f4782012-05-02 15:11:19 +0200224 r = radeon_gem_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200225 return r;
226 }
227 r = drm_gem_handle_create(filp, gobj, &handle);
Dave Airlie29d08b32010-09-27 16:17:17 +1000228 /* drop reference from allocate - handle holds it now */
229 drm_gem_object_unreference_unlocked(gobj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230 if (r) {
Christian König6c6f4782012-05-02 15:11:19 +0200231 r = radeon_gem_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200232 return r;
233 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200234 args->handle = handle;
235 return 0;
236}
237
238int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
239 struct drm_file *filp)
240{
241 /* transition the BO to a domain -
242 * just validate the BO into a certain domain */
243 struct drm_radeon_gem_set_domain *args = data;
244 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100245 struct radeon_bo *robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200246 int r;
247
248 /* for now if someone requests domain CPU -
249 * just make sure the buffer is finished with */
250
251 /* just do a BO wait for now */
252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
253 if (gobj == NULL) {
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100254 return -ENOENT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200255 }
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100256 robj = gem_to_radeon_bo(gobj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200257
258 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
259
Luca Barbieribc9025b2010-02-09 05:49:12 +0000260 drm_gem_object_unreference_unlocked(gobj);
Christian König6c6f4782012-05-02 15:11:19 +0200261 r = radeon_gem_handle_lockup(robj->rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200262 return r;
263}
264
Dave Airlieff72145b2011-02-07 12:16:14 +1000265int radeon_mode_dumb_mmap(struct drm_file *filp,
266 struct drm_device *dev,
267 uint32_t handle, uint64_t *offset_p)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200268{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200269 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100270 struct radeon_bo *robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200271
Dave Airlieff72145b2011-02-07 12:16:14 +1000272 gobj = drm_gem_object_lookup(dev, filp, handle);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200273 if (gobj == NULL) {
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100274 return -ENOENT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200275 }
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100276 robj = gem_to_radeon_bo(gobj);
Dave Airlieff72145b2011-02-07 12:16:14 +1000277 *offset_p = radeon_bo_mmap_offset(robj);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000278 drm_gem_object_unreference_unlocked(gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100279 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200280}
281
Dave Airlieff72145b2011-02-07 12:16:14 +1000282int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
283 struct drm_file *filp)
284{
285 struct drm_radeon_gem_mmap *args = data;
286
287 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
288}
289
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200290int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *filp)
292{
Dave Airliecefb87e2009-08-16 21:05:45 +1000293 struct drm_radeon_gem_busy *args = data;
294 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100295 struct radeon_bo *robj;
Dave Airliecefb87e2009-08-16 21:05:45 +1000296 int r;
Dave Airlie4361e522009-12-10 15:59:32 +1000297 uint32_t cur_placement = 0;
Dave Airliecefb87e2009-08-16 21:05:45 +1000298
299 gobj = drm_gem_object_lookup(dev, filp, args->handle);
300 if (gobj == NULL) {
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100301 return -ENOENT;
Dave Airliecefb87e2009-08-16 21:05:45 +1000302 }
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100303 robj = gem_to_radeon_bo(gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100304 r = radeon_bo_wait(robj, &cur_placement, true);
Michel Dänzer9f844e52009-08-22 17:38:23 +0200305 switch (cur_placement) {
306 case TTM_PL_VRAM:
Dave Airliecefb87e2009-08-16 21:05:45 +1000307 args->domain = RADEON_GEM_DOMAIN_VRAM;
Michel Dänzer9f844e52009-08-22 17:38:23 +0200308 break;
309 case TTM_PL_TT:
Dave Airliecefb87e2009-08-16 21:05:45 +1000310 args->domain = RADEON_GEM_DOMAIN_GTT;
Michel Dänzer9f844e52009-08-22 17:38:23 +0200311 break;
312 case TTM_PL_SYSTEM:
Dave Airliecefb87e2009-08-16 21:05:45 +1000313 args->domain = RADEON_GEM_DOMAIN_CPU;
Michel Dänzer9f844e52009-08-22 17:38:23 +0200314 default:
315 break;
316 }
Luca Barbieribc9025b2010-02-09 05:49:12 +0000317 drm_gem_object_unreference_unlocked(gobj);
Christian König6c6f4782012-05-02 15:11:19 +0200318 r = radeon_gem_handle_lockup(robj->rdev, r);
Dave Airliee3b24152009-08-21 09:47:45 +1000319 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200320}
321
322int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
323 struct drm_file *filp)
324{
325 struct drm_radeon_gem_wait_idle *args = data;
326 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100327 struct radeon_bo *robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200328 int r;
329
330 gobj = drm_gem_object_lookup(dev, filp, args->handle);
331 if (gobj == NULL) {
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100332 return -ENOENT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200333 }
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100334 robj = gem_to_radeon_bo(gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100335 r = radeon_bo_wait(robj, NULL, false);
Jerome Glisse062b3892010-02-04 20:36:39 +0100336 /* callback hw specific functions if any */
337 if (robj->rdev->asic->ioctl_wait_idle)
338 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000339 drm_gem_object_unreference_unlocked(gobj);
Christian König6c6f4782012-05-02 15:11:19 +0200340 r = radeon_gem_handle_lockup(robj->rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200341 return r;
342}
Dave Airliee024e112009-06-24 09:48:08 +1000343
344int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
345 struct drm_file *filp)
346{
347 struct drm_radeon_gem_set_tiling *args = data;
348 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100349 struct radeon_bo *robj;
Dave Airliee024e112009-06-24 09:48:08 +1000350 int r = 0;
351
352 DRM_DEBUG("%d \n", args->handle);
353 gobj = drm_gem_object_lookup(dev, filp, args->handle);
354 if (gobj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100355 return -ENOENT;
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100356 robj = gem_to_radeon_bo(gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100357 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000358 drm_gem_object_unreference_unlocked(gobj);
Dave Airliee024e112009-06-24 09:48:08 +1000359 return r;
360}
361
362int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *filp)
364{
365 struct drm_radeon_gem_get_tiling *args = data;
366 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100367 struct radeon_bo *rbo;
Dave Airliee024e112009-06-24 09:48:08 +1000368 int r = 0;
369
370 DRM_DEBUG("\n");
371 gobj = drm_gem_object_lookup(dev, filp, args->handle);
372 if (gobj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100373 return -ENOENT;
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100374 rbo = gem_to_radeon_bo(gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100375 r = radeon_bo_reserve(rbo, false);
376 if (unlikely(r != 0))
Dave Airlie51f07b72009-12-16 13:10:43 +1000377 goto out;
Jerome Glisse4c788672009-11-20 14:29:23 +0100378 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
379 radeon_bo_unreserve(rbo);
Dave Airlie51f07b72009-12-16 13:10:43 +1000380out:
Luca Barbieribc9025b2010-02-09 05:49:12 +0000381 drm_gem_object_unreference_unlocked(gobj);
Dave Airliee024e112009-06-24 09:48:08 +1000382 return r;
383}
Dave Airlieff72145b2011-02-07 12:16:14 +1000384
Jerome Glisse721604a2012-01-05 22:11:05 -0500385int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
386 struct drm_file *filp)
387{
388 struct drm_radeon_gem_va *args = data;
389 struct drm_gem_object *gobj;
390 struct radeon_device *rdev = dev->dev_private;
391 struct radeon_fpriv *fpriv = filp->driver_priv;
392 struct radeon_bo *rbo;
393 struct radeon_bo_va *bo_va;
394 u32 invalid_flags;
395 int r = 0;
396
Alex Deucher67e915e2012-01-06 09:38:15 -0500397 if (!rdev->vm_manager.enabled) {
398 args->operation = RADEON_VA_RESULT_ERROR;
399 return -ENOTTY;
400 }
401
Jerome Glisse721604a2012-01-05 22:11:05 -0500402 /* !! DONT REMOVE !!
403 * We don't support vm_id yet, to be sure we don't have have broken
404 * userspace, reject anyone trying to use non 0 value thus moving
405 * forward we can use those fields without breaking existant userspace
406 */
407 if (args->vm_id) {
408 args->operation = RADEON_VA_RESULT_ERROR;
409 return -EINVAL;
410 }
411
412 if (args->offset < RADEON_VA_RESERVED_SIZE) {
413 dev_err(&dev->pdev->dev,
414 "offset 0x%lX is in reserved area 0x%X\n",
415 (unsigned long)args->offset,
416 RADEON_VA_RESERVED_SIZE);
417 args->operation = RADEON_VA_RESULT_ERROR;
418 return -EINVAL;
419 }
420
421 /* don't remove, we need to enforce userspace to set the snooped flag
422 * otherwise we will endup with broken userspace and we won't be able
423 * to enable this feature without adding new interface
424 */
425 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
426 if ((args->flags & invalid_flags)) {
427 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
428 args->flags, invalid_flags);
429 args->operation = RADEON_VA_RESULT_ERROR;
430 return -EINVAL;
431 }
432 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
433 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
434 args->operation = RADEON_VA_RESULT_ERROR;
435 return -EINVAL;
436 }
437
438 switch (args->operation) {
439 case RADEON_VA_MAP:
440 case RADEON_VA_UNMAP:
441 break;
442 default:
443 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
444 args->operation);
445 args->operation = RADEON_VA_RESULT_ERROR;
446 return -EINVAL;
447 }
448
449 gobj = drm_gem_object_lookup(dev, filp, args->handle);
450 if (gobj == NULL) {
451 args->operation = RADEON_VA_RESULT_ERROR;
452 return -ENOENT;
453 }
454 rbo = gem_to_radeon_bo(gobj);
455 r = radeon_bo_reserve(rbo, false);
456 if (r) {
457 args->operation = RADEON_VA_RESULT_ERROR;
458 drm_gem_object_unreference_unlocked(gobj);
459 return r;
460 }
461 switch (args->operation) {
462 case RADEON_VA_MAP:
463 bo_va = radeon_bo_va(rbo, &fpriv->vm);
464 if (bo_va) {
465 args->operation = RADEON_VA_RESULT_VA_EXIST;
466 args->offset = bo_va->soffset;
467 goto out;
468 }
469 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
470 args->offset, args->flags);
471 break;
472 case RADEON_VA_UNMAP:
473 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
474 break;
475 default:
476 break;
477 }
478 args->operation = RADEON_VA_RESULT_OK;
479 if (r) {
480 args->operation = RADEON_VA_RESULT_ERROR;
481 }
482out:
483 radeon_bo_unreserve(rbo);
484 drm_gem_object_unreference_unlocked(gobj);
485 return r;
486}
487
Dave Airlieff72145b2011-02-07 12:16:14 +1000488int radeon_mode_dumb_create(struct drm_file *file_priv,
489 struct drm_device *dev,
490 struct drm_mode_create_dumb *args)
491{
492 struct radeon_device *rdev = dev->dev_private;
493 struct drm_gem_object *gobj;
Dave Airliec87a8d82011-03-17 13:58:34 +1000494 uint32_t handle;
Dave Airlieff72145b2011-02-07 12:16:14 +1000495 int r;
496
497 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
498 args->size = args->pitch * args->height;
499 args->size = ALIGN(args->size, PAGE_SIZE);
500
501 r = radeon_gem_object_create(rdev, args->size, 0,
502 RADEON_GEM_DOMAIN_VRAM,
503 false, ttm_bo_type_device,
504 &gobj);
505 if (r)
506 return -ENOMEM;
507
Dave Airliec87a8d82011-03-17 13:58:34 +1000508 r = drm_gem_handle_create(file_priv, gobj, &handle);
509 /* drop reference from allocate - handle holds it now */
510 drm_gem_object_unreference_unlocked(gobj);
Dave Airlieff72145b2011-02-07 12:16:14 +1000511 if (r) {
Dave Airlieff72145b2011-02-07 12:16:14 +1000512 return r;
513 }
Dave Airliec87a8d82011-03-17 13:58:34 +1000514 args->handle = handle;
Dave Airlieff72145b2011-02-07 12:16:14 +1000515 return 0;
516}
517
518int radeon_mode_dumb_destroy(struct drm_file *file_priv,
519 struct drm_device *dev,
520 uint32_t handle)
521{
522 return drm_gem_handle_delete(file_priv, handle);
523}