blob: cd4ed9e86704845f067d4c9e0d1ed6289c08dbba [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
Ben Skeggs6ee73862009-12-11 19:24:15 +100051 if (unlikely(nvbo->pin_refcnt)) {
52 nvbo->pin_refcnt = 1;
53 nouveau_bo_unpin(nvbo);
54 }
55
56 ttm_bo_unref(&bo);
Daniel Vetterfd632aa2010-04-09 19:05:05 +000057
58 drm_gem_object_release(gem);
59 kfree(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +100060}
61
62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
Ben Skeggs6ba9a682011-02-10 14:42:08 +100064 int size, int align, uint32_t domain, uint32_t tile_mode,
Ben Skeggsd550c412011-02-16 08:41:56 +100065 uint32_t tile_flags, struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +100066{
67 struct nouveau_bo *nvbo;
Ben Skeggs6ba9a682011-02-10 14:42:08 +100068 u32 flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +100069 int ret;
70
Ben Skeggs6ba9a682011-02-10 14:42:08 +100071 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
72 flags |= TTM_PL_FLAG_VRAM;
73 if (domain & NOUVEAU_GEM_DOMAIN_GART)
74 flags |= TTM_PL_FLAG_TT;
75 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
76 flags |= TTM_PL_FLAG_SYSTEM;
77
Ben Skeggs6ee73862009-12-11 19:24:15 +100078 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
Ben Skeggsd550c412011-02-16 08:41:56 +100079 tile_flags, pnvbo);
Ben Skeggs6ee73862009-12-11 19:24:15 +100080 if (ret)
81 return ret;
82 nvbo = *pnvbo;
83
84 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
85 if (!nvbo->gem) {
86 nouveau_bo_ref(NULL, pnvbo);
87 return -ENOMEM;
88 }
89
90 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
91 nvbo->gem->driver_private = nvbo;
92 return 0;
93}
94
95static int
96nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
97{
98 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
99
100 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
101 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
102 else
103 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
104
105 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
106 rep->offset = nvbo->bo.offset;
Ben Skeggsd550c412011-02-16 08:41:56 +1000107 rep->map_handle = nvbo->bo.addr_space_offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000108 rep->tile_mode = nvbo->tile_mode;
109 rep->tile_flags = nvbo->tile_flags;
110 return 0;
111}
112
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113int
114nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
115 struct drm_file *file_priv)
116{
117 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 struct drm_nouveau_gem_new *req = data;
119 struct nouveau_bo *nvbo = NULL;
120 struct nouveau_channel *chan = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000121 int ret = 0;
122
Ben Skeggs6ee73862009-12-11 19:24:15 +1000123 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
124 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
125
Ben Skeggs60d2a882010-12-06 15:28:54 +1000126 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
127 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000128 return -EINVAL;
Ben Skeggs60d2a882010-12-06 15:28:54 +1000129 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000130
Ben Skeggscff5c132010-10-06 16:16:59 +1000131 if (req->channel_hint) {
132 chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
133 if (IS_ERR(chan))
134 return PTR_ERR(chan);
135 }
136
Ben Skeggs6ba9a682011-02-10 14:42:08 +1000137 ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
138 req->info.domain, req->info.tile_mode,
139 req->info.tile_flags, &nvbo);
Ben Skeggscff5c132010-10-06 16:16:59 +1000140 if (chan)
141 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000142 if (ret)
143 return ret;
144
145 ret = nouveau_gem_info(nvbo->gem, &req->info);
146 if (ret)
147 goto out;
148
149 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
Dave Airlie29d08b32010-09-27 16:17:17 +1000150 /* drop reference from allocate - handle holds it now */
151 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000152out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000153 return ret;
154}
155
156static int
157nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
158 uint32_t write_domains, uint32_t valid_domains)
159{
160 struct nouveau_bo *nvbo = gem->driver_private;
161 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100162 uint32_t domains = valid_domains &
163 (write_domains ? write_domains : read_domains);
164 uint32_t pref_flags = 0, valid_flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000165
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100166 if (!domains)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000167 return -EINVAL;
168
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100169 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
170 valid_flags |= TTM_PL_FLAG_VRAM;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100172 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
173 valid_flags |= TTM_PL_FLAG_TT;
174
175 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
176 bo->mem.mem_type == TTM_PL_VRAM)
177 pref_flags |= TTM_PL_FLAG_VRAM;
178
179 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
180 bo->mem.mem_type == TTM_PL_TT)
181 pref_flags |= TTM_PL_FLAG_TT;
182
183 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
184 pref_flags |= TTM_PL_FLAG_VRAM;
185
186 else
187 pref_flags |= TTM_PL_FLAG_TT;
188
189 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
190
Ben Skeggs6ee73862009-12-11 19:24:15 +1000191 return 0;
192}
193
194struct validate_op {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000195 struct list_head vram_list;
196 struct list_head gart_list;
197 struct list_head both_list;
198};
199
200static void
201validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
202{
203 struct list_head *entry, *tmp;
204 struct nouveau_bo *nvbo;
205
206 list_for_each_safe(entry, tmp, list) {
207 nvbo = list_entry(entry, struct nouveau_bo, entry);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000208
Francisco Jerez332b2422010-10-20 23:35:40 +0200209 nouveau_bo_fence(nvbo, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000210
Ben Skeggsa1606a92010-02-12 10:27:35 +1000211 if (unlikely(nvbo->validate_mapped)) {
212 ttm_bo_kunmap(&nvbo->kmap);
213 nvbo->validate_mapped = false;
214 }
215
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216 list_del(&nvbo->entry);
217 nvbo->reserved_by = NULL;
218 ttm_bo_unreserve(&nvbo->bo);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200219 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000220 }
221}
222
223static void
Luca Barbieri234896a2010-01-06 04:02:45 +0100224validate_fini(struct validate_op *op, struct nouveau_fence* fence)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000225{
Luca Barbieri234896a2010-01-06 04:02:45 +0100226 validate_fini_list(&op->vram_list, fence);
227 validate_fini_list(&op->gart_list, fence);
228 validate_fini_list(&op->both_list, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000229}
230
231static int
232validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
233 struct drm_nouveau_gem_pushbuf_bo *pbbo,
234 int nr_buffers, struct validate_op *op)
235{
236 struct drm_device *dev = chan->dev;
237 struct drm_nouveau_private *dev_priv = dev->dev_private;
238 uint32_t sequence;
239 int trycnt = 0;
240 int ret, i;
241
242 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
243retry:
244 if (++trycnt > 100000) {
245 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
246 return -EINVAL;
247 }
248
249 for (i = 0; i < nr_buffers; i++) {
250 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
251 struct drm_gem_object *gem;
252 struct nouveau_bo *nvbo;
253
254 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
255 if (!gem) {
256 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
257 validate_fini(op, NULL);
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100258 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000259 }
260 nvbo = gem->driver_private;
261
262 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
263 NV_ERROR(dev, "multiple instances of buffer %d on "
264 "validation list\n", b->handle);
265 validate_fini(op, NULL);
266 return -EINVAL;
267 }
268
Ben Skeggs938c40e2010-10-12 09:54:54 +1000269 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000270 if (ret) {
271 validate_fini(op, NULL);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000272 if (unlikely(ret == -EAGAIN))
273 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200274 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000275 if (unlikely(ret)) {
276 if (ret != -ERESTARTSYS)
277 NV_ERROR(dev, "fail reserve\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000278 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000279 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280 goto retry;
281 }
282
Ben Skeggsa1606a92010-02-12 10:27:35 +1000283 b->user_priv = (uint64_t)(unsigned long)nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000284 nvbo->reserved_by = file_priv;
285 nvbo->pbbo_index = i;
286 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
287 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
288 list_add_tail(&nvbo->entry, &op->both_list);
289 else
290 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
291 list_add_tail(&nvbo->entry, &op->vram_list);
292 else
293 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
294 list_add_tail(&nvbo->entry, &op->gart_list);
295 else {
296 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
297 b->valid_domains);
Ben Skeggs02088432010-01-21 15:03:23 +1000298 list_add_tail(&nvbo->entry, &op->both_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000299 validate_fini(op, NULL);
300 return -EINVAL;
301 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000302 }
303
304 return 0;
305}
306
307static int
308validate_list(struct nouveau_channel *chan, struct list_head *list,
309 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
310{
311 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
312 (void __force __user *)(uintptr_t)user_pbbo_ptr;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000313 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314 struct nouveau_bo *nvbo;
315 int ret, relocs = 0;
316
317 list_for_each_entry(nvbo, list, entry) {
318 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319
Francisco Jerez27307232010-09-21 18:57:11 +0200320 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000321 if (unlikely(ret)) {
322 NV_ERROR(dev, "fail pre-validate sync\n");
323 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000324 }
325
326 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
327 b->write_domains,
328 b->valid_domains);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000329 if (unlikely(ret)) {
330 NV_ERROR(dev, "fail set_domain\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000331 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000332 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000333
Ben Skeggs415e6182010-07-23 09:06:52 +1000334 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
Ben Skeggs7a45d762010-11-22 08:50:27 +1000335 ret = nouveau_bo_validate(nvbo, true, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000336 nvbo->channel = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000337 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000338 if (ret != -ERESTARTSYS)
339 NV_ERROR(dev, "fail ttm_validate\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000340 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000341 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000342
Francisco Jerez27307232010-09-21 18:57:11 +0200343 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000344 if (unlikely(ret)) {
345 NV_ERROR(dev, "fail post-validate sync\n");
346 return ret;
347 }
348
Ben Skeggsa1606a92010-02-12 10:27:35 +1000349 if (nvbo->bo.offset == b->presumed.offset &&
Ben Skeggs6ee73862009-12-11 19:24:15 +1000350 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000351 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
Ben Skeggs6ee73862009-12-11 19:24:15 +1000352 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000353 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000354 continue;
355
356 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000357 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000358 else
Ben Skeggsa1606a92010-02-12 10:27:35 +1000359 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
360 b->presumed.offset = nvbo->bo.offset;
361 b->presumed.valid = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000362 relocs++;
363
Ben Skeggsa1606a92010-02-12 10:27:35 +1000364 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
365 &b->presumed, sizeof(b->presumed)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000366 return -EFAULT;
367 }
368
369 return relocs;
370}
371
372static int
373nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
374 struct drm_file *file_priv,
375 struct drm_nouveau_gem_pushbuf_bo *pbbo,
376 uint64_t user_buffers, int nr_buffers,
377 struct validate_op *op, int *apply_relocs)
378{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000379 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000380 int ret, relocs = 0;
381
382 INIT_LIST_HEAD(&op->vram_list);
383 INIT_LIST_HEAD(&op->gart_list);
384 INIT_LIST_HEAD(&op->both_list);
385
Ben Skeggs6ee73862009-12-11 19:24:15 +1000386 if (nr_buffers == 0)
387 return 0;
388
389 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000390 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000391 if (ret != -ERESTARTSYS)
392 NV_ERROR(dev, "validate_init\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000393 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000394 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000395
396 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
397 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000398 if (ret != -ERESTARTSYS)
399 NV_ERROR(dev, "validate vram_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000400 validate_fini(op, NULL);
401 return ret;
402 }
403 relocs += ret;
404
405 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
406 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000407 if (ret != -ERESTARTSYS)
408 NV_ERROR(dev, "validate gart_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000409 validate_fini(op, NULL);
410 return ret;
411 }
412 relocs += ret;
413
414 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
415 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000416 if (ret != -ERESTARTSYS)
417 NV_ERROR(dev, "validate both_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000418 validate_fini(op, NULL);
419 return ret;
420 }
421 relocs += ret;
422
423 *apply_relocs = relocs;
424 return 0;
425}
426
427static inline void *
428u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
429{
430 void *mem;
431 void __user *userptr = (void __force __user *)(uintptr_t)user;
432
433 mem = kmalloc(nmemb * size, GFP_KERNEL);
434 if (!mem)
435 return ERR_PTR(-ENOMEM);
436
437 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
438 kfree(mem);
439 return ERR_PTR(-EFAULT);
440 }
441
442 return mem;
443}
444
445static int
Ben Skeggsa1606a92010-02-12 10:27:35 +1000446nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
447 struct drm_nouveau_gem_pushbuf *req,
448 struct drm_nouveau_gem_pushbuf_bo *bo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000449{
450 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
Luca Barbieri12f735b2010-01-10 20:10:53 +0100451 int ret = 0;
452 unsigned i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000453
Ben Skeggsa1606a92010-02-12 10:27:35 +1000454 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000455 if (IS_ERR(reloc))
456 return PTR_ERR(reloc);
457
Ben Skeggsa1606a92010-02-12 10:27:35 +1000458 for (i = 0; i < req->nr_relocs; i++) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000459 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
460 struct drm_nouveau_gem_pushbuf_bo *b;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000461 struct nouveau_bo *nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000462 uint32_t data;
463
Ben Skeggsa1606a92010-02-12 10:27:35 +1000464 if (unlikely(r->bo_index > req->nr_buffers)) {
465 NV_ERROR(dev, "reloc bo index invalid\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000466 ret = -EINVAL;
467 break;
468 }
469
470 b = &bo[r->bo_index];
Ben Skeggsa1606a92010-02-12 10:27:35 +1000471 if (b->presumed.valid)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000472 continue;
473
Ben Skeggsa1606a92010-02-12 10:27:35 +1000474 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
475 NV_ERROR(dev, "reloc container bo index invalid\n");
476 ret = -EINVAL;
477 break;
478 }
479 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
480
481 if (unlikely(r->reloc_bo_offset + 4 >
482 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
483 NV_ERROR(dev, "reloc outside of bo\n");
484 ret = -EINVAL;
485 break;
486 }
487
488 if (!nvbo->kmap.virtual) {
489 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
490 &nvbo->kmap);
491 if (ret) {
492 NV_ERROR(dev, "failed kmap for reloc\n");
493 break;
494 }
495 nvbo->validate_mapped = true;
496 }
497
Ben Skeggs6ee73862009-12-11 19:24:15 +1000498 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000499 data = b->presumed.offset + r->data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000500 else
501 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000502 data = (b->presumed.offset + r->data) >> 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000503 else
504 data = r->data;
505
506 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000507 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000508 data |= r->tor;
509 else
510 data |= r->vor;
511 }
512
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000513 spin_lock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000514 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000515 spin_unlock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000516 if (ret) {
517 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
518 break;
519 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000520
521 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000522 }
523
524 kfree(reloc);
525 return ret;
526}
527
528int
529nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
530 struct drm_file *file_priv)
531{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000532 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000533 struct drm_nouveau_gem_pushbuf *req = data;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000534 struct drm_nouveau_gem_pushbuf_push *push;
535 struct drm_nouveau_gem_pushbuf_bo *bo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000536 struct nouveau_channel *chan;
537 struct validate_op op;
Francisco Jerez6e86e042010-07-03 18:36:39 +0200538 struct nouveau_fence *fence = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000539 int i, j, ret = 0, do_reloc = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000540
Ben Skeggscff5c132010-10-06 16:16:59 +1000541 chan = nouveau_channel_get(dev, file_priv, req->channel);
542 if (IS_ERR(chan))
543 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000544
Ben Skeggsa1606a92010-02-12 10:27:35 +1000545 req->vram_available = dev_priv->fb_aper_free;
546 req->gart_available = dev_priv->gart_info.aper_free;
547 if (unlikely(req->nr_push == 0))
548 goto out_next;
549
550 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
551 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
552 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
Ben Skeggscff5c132010-10-06 16:16:59 +1000553 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000554 return -EINVAL;
555 }
556
Ben Skeggsa1606a92010-02-12 10:27:35 +1000557 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
558 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
559 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000560 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000561 return -EINVAL;
562 }
563
564 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
565 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
566 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000567 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000568 return -EINVAL;
569 }
570
571 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
Ben Skeggscff5c132010-10-06 16:16:59 +1000572 if (IS_ERR(push)) {
573 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000574 return PTR_ERR(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000575 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000576
577 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
578 if (IS_ERR(bo)) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000579 kfree(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000580 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000581 return PTR_ERR(bo);
582 }
583
Ben Skeggs415e6182010-07-23 09:06:52 +1000584 /* Mark push buffers as being used on PFIFO, the validation code
585 * will then make sure that if the pushbuf bo moves, that they
586 * happen on the kernel channel, which will in turn cause a sync
587 * to happen before we try and submit the push buffer.
588 */
589 for (i = 0; i < req->nr_push; i++) {
590 if (push[i].bo_index >= req->nr_buffers) {
591 NV_ERROR(dev, "push %d buffer not in list\n", i);
592 ret = -EINVAL;
593 goto out;
594 }
595
596 bo[push[i].bo_index].read_domains |= (1 << 31);
597 }
598
Ben Skeggs6ee73862009-12-11 19:24:15 +1000599 /* Validate buffer list */
600 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
601 req->nr_buffers, &op, &do_reloc);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000602 if (ret) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000603 if (ret != -ERESTARTSYS)
604 NV_ERROR(dev, "validate: %d\n", ret);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000605 goto out;
606 }
607
Ben Skeggs6ee73862009-12-11 19:24:15 +1000608 /* Apply any relocations that are required */
609 if (do_reloc) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000610 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000611 if (ret) {
612 NV_ERROR(dev, "reloc apply: %d\n", ret);
613 goto out;
614 }
615 }
616
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000617 if (chan->dma.ib_max) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000618 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000619 if (ret) {
620 NV_INFO(dev, "nv50cal_space: %d\n", ret);
621 goto out;
622 }
623
Ben Skeggsa1606a92010-02-12 10:27:35 +1000624 for (i = 0; i < req->nr_push; i++) {
625 struct nouveau_bo *nvbo = (void *)(unsigned long)
626 bo[push[i].bo_index].user_priv;
627
628 nv50_dma_push(chan, nvbo, push[i].offset,
629 push[i].length);
630 }
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000631 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200632 if (dev_priv->chipset >= 0x25) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000633 ret = RING_SPACE(chan, req->nr_push * 2);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000634 if (ret) {
635 NV_ERROR(dev, "cal_space: %d\n", ret);
636 goto out;
637 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000638
639 for (i = 0; i < req->nr_push; i++) {
640 struct nouveau_bo *nvbo = (void *)(unsigned long)
641 bo[push[i].bo_index].user_priv;
642 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
643
644 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
645 push[i].offset) | 2);
646 OUT_RING(chan, 0);
647 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000648 } else {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000649 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000650 if (ret) {
651 NV_ERROR(dev, "jmp_space: %d\n", ret);
652 goto out;
653 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000654
Ben Skeggsa1606a92010-02-12 10:27:35 +1000655 for (i = 0; i < req->nr_push; i++) {
656 struct nouveau_bo *nvbo = (void *)(unsigned long)
657 bo[push[i].bo_index].user_priv;
658 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
659 uint32_t cmd;
660
661 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
662 cmd |= 0x20000000;
663 if (unlikely(cmd != req->suffix0)) {
664 if (!nvbo->kmap.virtual) {
665 ret = ttm_bo_kmap(&nvbo->bo, 0,
666 nvbo->bo.mem.
667 num_pages,
668 &nvbo->kmap);
669 if (ret) {
670 WIND_RING(chan);
671 goto out;
672 }
673 nvbo->validate_mapped = true;
674 }
675
676 nouveau_bo_wr32(nvbo, (push[i].offset +
677 push[i].length - 8) / 4, cmd);
678 }
679
680 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
681 push[i].offset) | 0x20000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000682 OUT_RING(chan, 0);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000683 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
684 OUT_RING(chan, 0);
685 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000686 }
687
Luca Barbieri234896a2010-01-06 04:02:45 +0100688 ret = nouveau_fence_new(chan, &fence, true);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000689 if (ret) {
690 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
691 WIND_RING(chan);
692 goto out;
693 }
694
695out:
Luca Barbieri234896a2010-01-06 04:02:45 +0100696 validate_fini(&op, fence);
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200697 nouveau_fence_unref(&fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000698 kfree(bo);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000699 kfree(push);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000700
701out_next:
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000702 if (chan->dma.ib_max) {
703 req->suffix0 = 0x00000000;
704 req->suffix1 = 0x00000000;
705 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200706 if (dev_priv->chipset >= 0x25) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000707 req->suffix0 = 0x00020000;
708 req->suffix1 = 0x00000000;
709 } else {
710 req->suffix0 = 0x20000000 |
711 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
712 req->suffix1 = 0x00000000;
713 }
714
Ben Skeggscff5c132010-10-06 16:16:59 +1000715 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000716 return ret;
717}
718
Ben Skeggs6ee73862009-12-11 19:24:15 +1000719static inline uint32_t
720domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
721{
722 uint32_t flags = 0;
723
724 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
725 flags |= TTM_PL_FLAG_VRAM;
726 if (domain & NOUVEAU_GEM_DOMAIN_GART)
727 flags |= TTM_PL_FLAG_TT;
728
729 return flags;
730}
731
732int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000733nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
734 struct drm_file *file_priv)
735{
736 struct drm_nouveau_gem_cpu_prep *req = data;
737 struct drm_gem_object *gem;
738 struct nouveau_bo *nvbo;
739 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
740 int ret = -EINVAL;
741
Ben Skeggs6ee73862009-12-11 19:24:15 +1000742 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
743 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100744 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000745 nvbo = nouveau_gem_object(gem);
746
Ben Skeggs21e86c12010-10-11 11:48:45 +1000747 spin_lock(&nvbo->bo.bdev->fence_lock);
748 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
749 spin_unlock(&nvbo->bo.bdev->fence_lock);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000750 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000751 return ret;
752}
753
754int
755nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
756 struct drm_file *file_priv)
757{
Ben Skeggs21e86c12010-10-11 11:48:45 +1000758 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000759}
760
761int
762nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
763 struct drm_file *file_priv)
764{
765 struct drm_nouveau_gem_info *req = data;
766 struct drm_gem_object *gem;
767 int ret;
768
Ben Skeggs6ee73862009-12-11 19:24:15 +1000769 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
770 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100771 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000772
773 ret = nouveau_gem_info(gem, req);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000774 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000775 return ret;
776}
777