blob: fae5c158351c7cfd3d32d38ab28d24503f9b83dc [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
Jerome Glisseca262a9992009-12-08 15:33:32 +010030/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020054static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020055static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
Jerome Glissefb53f862009-12-09 21:55:10 +010062static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75 struct ttm_mem_type_manager *man)
76{
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching);
88 spin_lock(&glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&glob->lru_lock);
91}
92
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man);
113 }
114}
115
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200116static ssize_t ttm_bo_global_show(struct kobject *kobj,
117 struct attribute *attr,
118 char *buffer)
119{
120 struct ttm_bo_global *glob =
121 container_of(kobj, struct ttm_bo_global, kobj);
122
123 return snprintf(buffer, PAGE_SIZE, "%lu\n",
124 (unsigned long) atomic_read(&glob->bo_count));
125}
126
127static struct attribute *ttm_bo_global_attrs[] = {
128 &ttm_bo_count,
129 NULL
130};
131
132static struct sysfs_ops ttm_bo_global_ops = {
133 .show = &ttm_bo_global_show
134};
135
136static struct kobj_type ttm_bo_glob_kobj_type = {
137 .release = &ttm_bo_global_kobj_release,
138 .sysfs_ops = &ttm_bo_global_ops,
139 .default_attrs = ttm_bo_global_attrs
140};
141
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200142
143static inline uint32_t ttm_bo_type_flags(unsigned type)
144{
145 return 1 << (type);
146}
147
148static void ttm_bo_release_list(struct kref *list_kref)
149{
150 struct ttm_buffer_object *bo =
151 container_of(list_kref, struct ttm_buffer_object, list_kref);
152 struct ttm_bo_device *bdev = bo->bdev;
153
154 BUG_ON(atomic_read(&bo->list_kref.refcount));
155 BUG_ON(atomic_read(&bo->kref.refcount));
156 BUG_ON(atomic_read(&bo->cpu_writers));
157 BUG_ON(bo->sync_obj != NULL);
158 BUG_ON(bo->mem.mm_node != NULL);
159 BUG_ON(!list_empty(&bo->lru));
160 BUG_ON(!list_empty(&bo->ddestroy));
161
162 if (bo->ttm)
163 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200164 atomic_dec(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200165 if (bo->destroy)
166 bo->destroy(bo);
167 else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200168 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200169 kfree(bo);
170 }
171}
172
173int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
174{
175
176 if (interruptible) {
177 int ret = 0;
178
179 ret = wait_event_interruptible(bo->event_queue,
180 atomic_read(&bo->reserved) == 0);
181 if (unlikely(ret != 0))
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100182 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200183 } else {
184 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
185 }
186 return 0;
187}
188
189static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
190{
191 struct ttm_bo_device *bdev = bo->bdev;
192 struct ttm_mem_type_manager *man;
193
194 BUG_ON(!atomic_read(&bo->reserved));
195
196 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
197
198 BUG_ON(!list_empty(&bo->lru));
199
200 man = &bdev->man[bo->mem.mem_type];
201 list_add_tail(&bo->lru, &man->lru);
202 kref_get(&bo->list_kref);
203
204 if (bo->ttm != NULL) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200205 list_add_tail(&bo->swap, &bo->glob->swap_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200206 kref_get(&bo->list_kref);
207 }
208 }
209}
210
211/**
212 * Call with the lru_lock held.
213 */
214
215static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
216{
217 int put_count = 0;
218
219 if (!list_empty(&bo->swap)) {
220 list_del_init(&bo->swap);
221 ++put_count;
222 }
223 if (!list_empty(&bo->lru)) {
224 list_del_init(&bo->lru);
225 ++put_count;
226 }
227
228 /*
229 * TODO: Add a driver hook to delete from
230 * driver-specific LRU's here.
231 */
232
233 return put_count;
234}
235
236int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
237 bool interruptible,
238 bool no_wait, bool use_sequence, uint32_t sequence)
239{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200240 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200241 int ret;
242
243 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
244 if (use_sequence && bo->seq_valid &&
245 (sequence - bo->val_seq < (1 << 31))) {
246 return -EAGAIN;
247 }
248
249 if (no_wait)
250 return -EBUSY;
251
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200252 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200253 ret = ttm_bo_wait_unreserved(bo, interruptible);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200254 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200255
256 if (unlikely(ret))
257 return ret;
258 }
259
260 if (use_sequence) {
261 bo->val_seq = sequence;
262 bo->seq_valid = true;
263 } else {
264 bo->seq_valid = false;
265 }
266
267 return 0;
268}
269EXPORT_SYMBOL(ttm_bo_reserve);
270
271static void ttm_bo_ref_bug(struct kref *list_kref)
272{
273 BUG();
274}
275
276int ttm_bo_reserve(struct ttm_buffer_object *bo,
277 bool interruptible,
278 bool no_wait, bool use_sequence, uint32_t sequence)
279{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200280 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200281 int put_count = 0;
282 int ret;
283
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200284 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200285 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
286 sequence);
287 if (likely(ret == 0))
288 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200289 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200290
291 while (put_count--)
292 kref_put(&bo->list_kref, ttm_bo_ref_bug);
293
294 return ret;
295}
296
297void ttm_bo_unreserve(struct ttm_buffer_object *bo)
298{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200299 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200300
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200301 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200302 ttm_bo_add_to_lru(bo);
303 atomic_set(&bo->reserved, 0);
304 wake_up_all(&bo->event_queue);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200305 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200306}
307EXPORT_SYMBOL(ttm_bo_unreserve);
308
309/*
310 * Call bo->mutex locked.
311 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200312static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
313{
314 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200315 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200316 int ret = 0;
317 uint32_t page_flags = 0;
318
319 TTM_ASSERT_LOCKED(&bo->mutex);
320 bo->ttm = NULL;
321
Dave Airliead49f502009-07-10 22:36:26 +1000322 if (bdev->need_dma32)
323 page_flags |= TTM_PAGE_FLAG_DMA32;
324
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200325 switch (bo->type) {
326 case ttm_bo_type_device:
327 if (zero_alloc)
328 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
329 case ttm_bo_type_kernel:
330 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200331 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200332 if (unlikely(bo->ttm == NULL))
333 ret = -ENOMEM;
334 break;
335 case ttm_bo_type_user:
336 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
337 page_flags | TTM_PAGE_FLAG_USER,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200338 glob->dummy_read_page);
Dave Airlie447aeb92009-12-08 09:25:45 +1000339 if (unlikely(bo->ttm == NULL)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200340 ret = -ENOMEM;
Dave Airlie447aeb92009-12-08 09:25:45 +1000341 break;
342 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200343
344 ret = ttm_tt_set_user(bo->ttm, current,
345 bo->buffer_start, bo->num_pages);
346 if (unlikely(ret != 0))
347 ttm_tt_destroy(bo->ttm);
348 break;
349 default:
350 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
351 ret = -EINVAL;
352 break;
353 }
354
355 return ret;
356}
357
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait)
361{
362 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
364 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
365 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
366 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
367 int ret = 0;
368
369 if (old_is_pci || new_is_pci ||
370 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
371 ttm_bo_unmap_virtual(bo);
372
373 /*
374 * Create and bind a ttm if required.
375 */
376
377 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
378 ret = ttm_bo_add_ttm(bo, false);
379 if (ret)
380 goto out_err;
381
382 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
383 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200384 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385
386 if (mem->mem_type != TTM_PL_SYSTEM) {
387 ret = ttm_tt_bind(bo->ttm, mem);
388 if (ret)
389 goto out_err;
390 }
391
392 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100393 bo->mem = *mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200394 mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200395 goto moved;
396 }
397
398 }
399
Dave Airliee024e112009-06-24 09:48:08 +1000400 if (bdev->driver->move_notify)
401 bdev->driver->move_notify(bo, mem);
402
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
406 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem);
409 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
411
412 if (ret)
413 goto out_err;
414
415moved:
416 if (bo->evicted) {
417 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
418 if (ret)
419 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
420 bo->evicted = false;
421 }
422
423 if (bo->mem.mm_node) {
424 spin_lock(&bo->lock);
425 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock);
429 }
430
431 return 0;
432
433out_err:
434 new_man = &bdev->man[bo->mem.mem_type];
435 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436 ttm_tt_unbind(bo->ttm);
437 ttm_tt_destroy(bo->ttm);
438 bo->ttm = NULL;
439 }
440
441 return ret;
442}
443
444/**
445 * If bo idle, remove from delayed- and lru lists, and unref.
446 * If not idle, and already on delayed list, do nothing.
447 * If not idle, and not on delayed list, put on delayed list,
448 * up the list_kref and schedule a delayed list check.
449 */
450
451static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
452{
453 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200454 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200455 struct ttm_bo_driver *driver = bdev->driver;
456 int ret;
457
458 spin_lock(&bo->lock);
459 (void) ttm_bo_wait(bo, false, false, !remove_all);
460
461 if (!bo->sync_obj) {
462 int put_count;
463
464 spin_unlock(&bo->lock);
465
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200466 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200467 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
468 BUG_ON(ret);
469 if (bo->ttm)
470 ttm_tt_unbind(bo->ttm);
471
472 if (!list_empty(&bo->ddestroy)) {
473 list_del_init(&bo->ddestroy);
474 kref_put(&bo->list_kref, ttm_bo_ref_bug);
475 }
476 if (bo->mem.mm_node) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100477 bo->mem.mm_node->private = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200478 drm_mm_put_block(bo->mem.mm_node);
479 bo->mem.mm_node = NULL;
480 }
481 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200482 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200483
484 atomic_set(&bo->reserved, 0);
485
486 while (put_count--)
487 kref_put(&bo->list_kref, ttm_bo_release_list);
488
489 return 0;
490 }
491
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200492 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200493 if (list_empty(&bo->ddestroy)) {
494 void *sync_obj = bo->sync_obj;
495 void *sync_obj_arg = bo->sync_obj_arg;
496
497 kref_get(&bo->list_kref);
498 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200499 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200500 spin_unlock(&bo->lock);
501
502 if (sync_obj)
503 driver->sync_obj_flush(sync_obj, sync_obj_arg);
504 schedule_delayed_work(&bdev->wq,
505 ((HZ / 100) < 1) ? 1 : HZ / 100);
506 ret = 0;
507
508 } else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200509 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200510 spin_unlock(&bo->lock);
511 ret = -EBUSY;
512 }
513
514 return ret;
515}
516
517/**
518 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
519 * encountered buffers.
520 */
521
522static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
523{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200524 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200525 struct ttm_buffer_object *entry, *nentry;
526 struct list_head *list, *next;
527 int ret;
528
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200529 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200530 list_for_each_safe(list, next, &bdev->ddestroy) {
531 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
532 nentry = NULL;
533
534 /*
535 * Protect the next list entry from destruction while we
536 * unlock the lru_lock.
537 */
538
539 if (next != &bdev->ddestroy) {
540 nentry = list_entry(next, struct ttm_buffer_object,
541 ddestroy);
542 kref_get(&nentry->list_kref);
543 }
544 kref_get(&entry->list_kref);
545
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200546 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200547 ret = ttm_bo_cleanup_refs(entry, remove_all);
548 kref_put(&entry->list_kref, ttm_bo_release_list);
549
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200550 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 if (nentry) {
552 bool next_onlist = !list_empty(next);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200553 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200554 kref_put(&nentry->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200555 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200556 /*
557 * Someone might have raced us and removed the
558 * next entry from the list. We don't bother restarting
559 * list traversal.
560 */
561
562 if (!next_onlist)
563 break;
564 }
565 if (ret)
566 break;
567 }
568 ret = !list_empty(&bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200569 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200570
571 return ret;
572}
573
574static void ttm_bo_delayed_workqueue(struct work_struct *work)
575{
576 struct ttm_bo_device *bdev =
577 container_of(work, struct ttm_bo_device, wq.work);
578
579 if (ttm_bo_delayed_delete(bdev, false)) {
580 schedule_delayed_work(&bdev->wq,
581 ((HZ / 100) < 1) ? 1 : HZ / 100);
582 }
583}
584
585static void ttm_bo_release(struct kref *kref)
586{
587 struct ttm_buffer_object *bo =
588 container_of(kref, struct ttm_buffer_object, kref);
589 struct ttm_bo_device *bdev = bo->bdev;
590
591 if (likely(bo->vm_node != NULL)) {
592 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
593 drm_mm_put_block(bo->vm_node);
594 bo->vm_node = NULL;
595 }
596 write_unlock(&bdev->vm_lock);
597 ttm_bo_cleanup_refs(bo, false);
598 kref_put(&bo->list_kref, ttm_bo_release_list);
599 write_lock(&bdev->vm_lock);
600}
601
602void ttm_bo_unref(struct ttm_buffer_object **p_bo)
603{
604 struct ttm_buffer_object *bo = *p_bo;
605 struct ttm_bo_device *bdev = bo->bdev;
606
607 *p_bo = NULL;
608 write_lock(&bdev->vm_lock);
609 kref_put(&bo->kref, ttm_bo_release);
610 write_unlock(&bdev->vm_lock);
611}
612EXPORT_SYMBOL(ttm_bo_unref);
613
Jerome Glisseca262a9992009-12-08 15:33:32 +0100614static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200616{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200617 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200618 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200619 struct ttm_mem_reg evict_mem;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100620 struct ttm_placement placement;
621 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200622
623 spin_lock(&bo->lock);
624 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
625 spin_unlock(&bo->lock);
626
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200627 if (unlikely(ret != 0)) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100628 if (ret != -ERESTARTSYS) {
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200629 printk(KERN_ERR TTM_PFX
630 "Failed to expire sync object before "
631 "buffer eviction.\n");
632 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200633 goto out;
634 }
635
636 BUG_ON(!atomic_read(&bo->reserved));
637
638 evict_mem = bo->mem;
639 evict_mem.mm_node = NULL;
640
Jerome Glisse7cb7d1d2009-12-09 22:14:27 +0100641 placement.fpfn = 0;
642 placement.lpfn = 0;
643 placement.num_placement = 0;
644 placement.num_busy_placement = 0;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100645 bdev->driver->evict_flags(bo, &placement);
646 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
647 no_wait);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200648 if (ret) {
Jerome Glissefb53f862009-12-09 21:55:10 +0100649 if (ret != -ERESTARTSYS) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200650 printk(KERN_ERR TTM_PFX
651 "Failed to find memory space for "
652 "buffer 0x%p eviction.\n", bo);
Jerome Glissefb53f862009-12-09 21:55:10 +0100653 ttm_bo_mem_space_debug(bo, &placement);
654 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200655 goto out;
656 }
657
658 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
659 no_wait);
660 if (ret) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100661 if (ret != -ERESTARTSYS)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200662 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
Jerome Glisseca262a9992009-12-08 15:33:32 +0100663 spin_lock(&glob->lru_lock);
664 if (evict_mem.mm_node) {
665 evict_mem.mm_node->private = NULL;
666 drm_mm_put_block(evict_mem.mm_node);
667 evict_mem.mm_node = NULL;
668 }
669 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200670 goto out;
671 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200672 bo->evicted = true;
673out:
674 return ret;
675}
676
Jerome Glisseca262a9992009-12-08 15:33:32 +0100677static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
678 uint32_t mem_type,
679 bool interruptible, bool no_wait)
680{
681 struct ttm_bo_global *glob = bdev->glob;
682 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
683 struct ttm_buffer_object *bo;
684 int ret, put_count = 0;
685
686 spin_lock(&glob->lru_lock);
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref);
689 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
690 if (likely(ret == 0))
691 put_count = ttm_bo_del_from_lru(bo);
692 spin_unlock(&glob->lru_lock);
693 if (unlikely(ret != 0))
694 return ret;
695 while (put_count--)
696 kref_put(&bo->list_kref, ttm_bo_ref_bug);
697 ret = ttm_bo_evict(bo, interruptible, no_wait);
698 ttm_bo_unreserve(bo);
699 kref_put(&bo->list_kref, ttm_bo_release_list);
700 return ret;
701}
702
703static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
704 struct ttm_mem_type_manager *man,
705 struct ttm_placement *placement,
706 struct ttm_mem_reg *mem,
707 struct drm_mm_node **node)
708{
709 struct ttm_bo_global *glob = bo->glob;
710 unsigned long lpfn;
711 int ret;
712
713 lpfn = placement->lpfn;
714 if (!lpfn)
715 lpfn = man->size;
716 *node = NULL;
717 do {
718 ret = drm_mm_pre_get(&man->manager);
719 if (unlikely(ret))
720 return ret;
721
722 spin_lock(&glob->lru_lock);
723 *node = drm_mm_search_free_in_range(&man->manager,
724 mem->num_pages, mem->page_alignment,
725 placement->fpfn, lpfn, 1);
726 if (unlikely(*node == NULL)) {
727 spin_unlock(&glob->lru_lock);
728 return 0;
729 }
730 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
731 mem->page_alignment,
732 placement->fpfn,
733 lpfn);
734 spin_unlock(&glob->lru_lock);
735 } while (*node == NULL);
736 return 0;
737}
738
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200739/**
740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
741 * space, or we've evicted everything and there isn't enough space.
742 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100743static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
744 uint32_t mem_type,
745 struct ttm_placement *placement,
746 struct ttm_mem_reg *mem,
747 bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200748{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100749 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200750 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200751 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Jerome Glisseca262a9992009-12-08 15:33:32 +0100752 struct drm_mm_node *node;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200753 int ret;
754
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200755 do {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100756 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200757 if (unlikely(ret != 0))
758 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100759 if (node)
760 break;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200761 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100762 if (list_empty(&man->lru)) {
763 spin_unlock(&glob->lru_lock);
764 break;
765 }
766 spin_unlock(&glob->lru_lock);
767 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
768 no_wait);
769 if (unlikely(ret != 0))
770 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200771 } while (1);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100772 if (node == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200773 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200774 mem->mm_node = node;
775 mem->mem_type = mem_type;
776 return 0;
777}
778
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200779static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
780 uint32_t cur_placement,
781 uint32_t proposed_placement)
782{
783 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
784 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
785
786 /**
787 * Keep current caching if possible.
788 */
789
790 if ((cur_placement & caching) != 0)
791 result |= (cur_placement & caching);
792 else if ((man->default_caching & caching) != 0)
793 result |= man->default_caching;
794 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
795 result |= TTM_PL_FLAG_CACHED;
796 else if ((TTM_PL_FLAG_WC & caching) != 0)
797 result |= TTM_PL_FLAG_WC;
798 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
799 result |= TTM_PL_FLAG_UNCACHED;
800
801 return result;
802}
803
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200804static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
805 bool disallow_fixed,
806 uint32_t mem_type,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200807 uint32_t proposed_placement,
808 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200809{
810 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
811
812 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
813 return false;
814
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200815 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200816 return false;
817
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200818 if ((proposed_placement & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200819 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200820
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200821 cur_flags |= (proposed_placement & man->available_caching);
822
823 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200824 return true;
825}
826
827/**
828 * Creates space for memory region @mem according to its type.
829 *
830 * This function first searches for free space in compatible memory types in
831 * the priority order defined by the driver. If free space isn't found, then
832 * ttm_bo_mem_force_space is attempted in priority order to evict and find
833 * space.
834 */
835int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100836 struct ttm_placement *placement,
837 struct ttm_mem_reg *mem,
838 bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200839{
840 struct ttm_bo_device *bdev = bo->bdev;
841 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200842 uint32_t mem_type = TTM_PL_SYSTEM;
843 uint32_t cur_flags = 0;
844 bool type_found = false;
845 bool type_ok = false;
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100846 bool has_erestartsys = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200847 struct drm_mm_node *node = NULL;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100848 int i, ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200849
850 mem->mm_node = NULL;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100851 for (i = 0; i <= placement->num_placement; ++i) {
852 ret = ttm_mem_type_from_flags(placement->placement[i],
853 &mem_type);
854 if (ret)
855 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200856 man = &bdev->man[mem_type];
857
858 type_ok = ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100859 bo->type == ttm_bo_type_user,
860 mem_type,
861 placement->placement[i],
862 &cur_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200863
864 if (!type_ok)
865 continue;
866
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200867 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
868 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100869 /*
870 * Use the access and other non-mapping-related flag bits from
871 * the memory placement flags to the current flags
872 */
873 ttm_flag_masked(&cur_flags, placement->placement[i],
874 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200875
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200876 if (mem_type == TTM_PL_SYSTEM)
877 break;
878
879 if (man->has_type && man->use_type) {
880 type_found = true;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100881 ret = ttm_bo_man_get_node(bo, man, placement, mem,
882 &node);
883 if (unlikely(ret))
884 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200885 }
886 if (node)
887 break;
888 }
889
890 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
891 mem->mm_node = node;
892 mem->mem_type = mem_type;
893 mem->placement = cur_flags;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100894 if (node)
895 node->private = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200896 return 0;
897 }
898
899 if (!type_found)
900 return -EINVAL;
901
Jerome Glisseca262a9992009-12-08 15:33:32 +0100902 for (i = 0; i <= placement->num_busy_placement; ++i) {
903 ret = ttm_mem_type_from_flags(placement->placement[i],
904 &mem_type);
905 if (ret)
906 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200907 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200908 if (!man->has_type)
909 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200910 if (!ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100911 bo->type == ttm_bo_type_user,
912 mem_type,
913 placement->placement[i],
914 &cur_flags))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200915 continue;
916
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200917 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
918 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100919 /*
920 * Use the access and other non-mapping-related flag bits from
921 * the memory placement flags to the current flags
922 */
923 ttm_flag_masked(&cur_flags, placement->placement[i],
924 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200925
Jerome Glisseca262a9992009-12-08 15:33:32 +0100926 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
927 interruptible, no_wait);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200928 if (ret == 0 && mem->mm_node) {
929 mem->placement = cur_flags;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100930 mem->mm_node->private = bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200931 return 0;
932 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100933 if (ret == -ERESTARTSYS)
934 has_erestartsys = true;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200935 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100936 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200937 return ret;
938}
939EXPORT_SYMBOL(ttm_bo_mem_space);
940
941int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
942{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200943 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
944 return -EBUSY;
945
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100946 return wait_event_interruptible(bo->event_queue,
947 atomic_read(&bo->cpu_writers) == 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200948}
949
950int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100951 struct ttm_placement *placement,
952 bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200953{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200954 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200955 int ret = 0;
956 struct ttm_mem_reg mem;
957
958 BUG_ON(!atomic_read(&bo->reserved));
959
960 /*
961 * FIXME: It's possible to pipeline buffer moves.
962 * Have the driver move function wait for idle when necessary,
963 * instead of doing it here.
964 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200965 spin_lock(&bo->lock);
966 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
967 spin_unlock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200968 if (ret)
969 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200970 mem.num_pages = bo->num_pages;
971 mem.size = mem.num_pages << PAGE_SHIFT;
972 mem.page_alignment = bo->mem.page_alignment;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200973 /*
974 * Determine where to move the buffer.
975 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100976 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200977 if (ret)
978 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200979 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200980out_unlock:
981 if (ret && mem.mm_node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200982 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100983 mem.mm_node->private = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200984 drm_mm_put_block(mem.mm_node);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200985 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200986 }
987 return ret;
988}
989
Jerome Glisseca262a9992009-12-08 15:33:32 +0100990static int ttm_bo_mem_compat(struct ttm_placement *placement,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200991 struct ttm_mem_reg *mem)
992{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100993 int i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200994
Jerome Glisseca262a9992009-12-08 15:33:32 +0100995 for (i = 0; i < placement->num_placement; i++) {
996 if ((placement->placement[i] & mem->placement &
997 TTM_PL_MASK_CACHING) &&
998 (placement->placement[i] & mem->placement &
999 TTM_PL_MASK_MEM))
1000 return i;
1001 }
1002 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001003}
1004
Jerome Glisse09855ac2009-12-10 17:16:27 +01001005int ttm_bo_validate(struct ttm_buffer_object *bo,
1006 struct ttm_placement *placement,
1007 bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001008{
1009 int ret;
1010
1011 BUG_ON(!atomic_read(&bo->reserved));
Jerome Glisseca262a9992009-12-08 15:33:32 +01001012 /* Check that range is valid */
1013 if (placement->lpfn || placement->fpfn)
1014 if (placement->fpfn > placement->lpfn ||
1015 (placement->lpfn - placement->fpfn) < bo->num_pages)
1016 return -EINVAL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001017 /*
1018 * Check whether we need to move buffer.
1019 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001020 ret = ttm_bo_mem_compat(placement, &bo->mem);
1021 if (ret < 0) {
1022 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
1023 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001024 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001025 } else {
1026 /*
1027 * Use the access and other non-mapping-related flag bits from
1028 * the compatible memory placement flags to the active flags
1029 */
1030 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1031 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001032 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001033 /*
1034 * We might need to add a TTM.
1035 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001036 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1037 ret = ttm_bo_add_ttm(bo, true);
1038 if (ret)
1039 return ret;
1040 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001041 return 0;
1042}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001043EXPORT_SYMBOL(ttm_bo_validate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001044
Jerome Glisse09855ac2009-12-10 17:16:27 +01001045int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1046 struct ttm_placement *placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001047{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001048 int i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001049
Jerome Glisse09855ac2009-12-10 17:16:27 +01001050 if (placement->fpfn || placement->lpfn) {
1051 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1052 printk(KERN_ERR TTM_PFX "Page number range to small "
1053 "Need %lu pages, range is [%u, %u]\n",
1054 bo->mem.num_pages, placement->fpfn,
1055 placement->lpfn);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001056 return -EINVAL;
1057 }
Jerome Glisse09855ac2009-12-10 17:16:27 +01001058 }
1059 for (i = 0; i < placement->num_placement; i++) {
1060 if (!capable(CAP_SYS_ADMIN)) {
1061 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1062 printk(KERN_ERR TTM_PFX "Need to be root to "
1063 "modify NO_EVICT status.\n");
1064 return -EINVAL;
1065 }
1066 }
1067 }
1068 for (i = 0; i < placement->num_busy_placement; i++) {
1069 if (!capable(CAP_SYS_ADMIN)) {
1070 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1071 printk(KERN_ERR TTM_PFX "Need to be root to "
1072 "modify NO_EVICT status.\n");
1073 return -EINVAL;
1074 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001075 }
1076 }
1077 return 0;
1078}
1079
Jerome Glisse09855ac2009-12-10 17:16:27 +01001080int ttm_bo_init(struct ttm_bo_device *bdev,
1081 struct ttm_buffer_object *bo,
1082 unsigned long size,
1083 enum ttm_bo_type type,
1084 struct ttm_placement *placement,
1085 uint32_t page_alignment,
1086 unsigned long buffer_start,
1087 bool interruptible,
1088 struct file *persistant_swap_storage,
1089 size_t acc_size,
1090 void (*destroy) (struct ttm_buffer_object *))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001091{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001092 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001093 unsigned long num_pages;
1094
1095 size += buffer_start & ~PAGE_MASK;
1096 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1097 if (num_pages == 0) {
1098 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1099 return -EINVAL;
1100 }
1101 bo->destroy = destroy;
1102
1103 spin_lock_init(&bo->lock);
1104 kref_init(&bo->kref);
1105 kref_init(&bo->list_kref);
1106 atomic_set(&bo->cpu_writers, 0);
1107 atomic_set(&bo->reserved, 1);
1108 init_waitqueue_head(&bo->event_queue);
1109 INIT_LIST_HEAD(&bo->lru);
1110 INIT_LIST_HEAD(&bo->ddestroy);
1111 INIT_LIST_HEAD(&bo->swap);
1112 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001113 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001114 bo->type = type;
1115 bo->num_pages = num_pages;
1116 bo->mem.mem_type = TTM_PL_SYSTEM;
1117 bo->mem.num_pages = bo->num_pages;
1118 bo->mem.mm_node = NULL;
1119 bo->mem.page_alignment = page_alignment;
1120 bo->buffer_start = buffer_start & PAGE_MASK;
1121 bo->priv_flags = 0;
1122 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1123 bo->seq_valid = false;
1124 bo->persistant_swap_storage = persistant_swap_storage;
1125 bo->acc_size = acc_size;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001126 atomic_inc(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001127
Jerome Glisse09855ac2009-12-10 17:16:27 +01001128 ret = ttm_bo_check_placement(bo, placement);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001129 if (unlikely(ret != 0))
1130 goto out_err;
1131
1132 /*
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001133 * For ttm_bo_type_device buffers, allocate
1134 * address space from the device.
1135 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001136 if (bo->type == ttm_bo_type_device) {
1137 ret = ttm_bo_setup_vm(bo);
1138 if (ret)
1139 goto out_err;
1140 }
1141
Jerome Glisse09855ac2009-12-10 17:16:27 +01001142 ret = ttm_bo_validate(bo, placement, interruptible, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001143 if (ret)
1144 goto out_err;
1145
1146 ttm_bo_unreserve(bo);
1147 return 0;
1148
1149out_err:
1150 ttm_bo_unreserve(bo);
1151 ttm_bo_unref(&bo);
1152
1153 return ret;
1154}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001155EXPORT_SYMBOL(ttm_bo_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001156
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001157static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001158 unsigned long num_pages)
1159{
1160 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1161 PAGE_MASK;
1162
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001163 return glob->ttm_bo_size + 2 * page_array_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001164}
1165
Jerome Glisse09855ac2009-12-10 17:16:27 +01001166int ttm_bo_create(struct ttm_bo_device *bdev,
1167 unsigned long size,
1168 enum ttm_bo_type type,
1169 struct ttm_placement *placement,
1170 uint32_t page_alignment,
1171 unsigned long buffer_start,
1172 bool interruptible,
1173 struct file *persistant_swap_storage,
1174 struct ttm_buffer_object **p_bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001175{
1176 struct ttm_buffer_object *bo;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001177 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001178 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001179
1180 size_t acc_size =
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001181 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001182 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001183 if (unlikely(ret != 0))
1184 return ret;
1185
1186 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1187
1188 if (unlikely(bo == NULL)) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001189 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001190 return -ENOMEM;
1191 }
1192
Jerome Glisse09855ac2009-12-10 17:16:27 +01001193 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1194 buffer_start, interruptible,
1195 persistant_swap_storage, acc_size, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001196 if (likely(ret == 0))
1197 *p_bo = bo;
1198
1199 return ret;
1200}
1201
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001202static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001203 unsigned mem_type, bool allow_errors)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001204{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001205 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001206 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001207 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001208
1209 /*
1210 * Can't use standard list traversal since we're unlocking.
1211 */
1212
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001213 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001214 while (!list_empty(&man->lru)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001215 spin_unlock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001216 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1217 if (ret) {
1218 if (allow_errors) {
1219 return ret;
1220 } else {
1221 printk(KERN_ERR TTM_PFX
1222 "Cleanup eviction failed\n");
1223 }
1224 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001225 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001226 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001227 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001228 return 0;
1229}
1230
1231int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1232{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001233 struct ttm_bo_global *glob = bdev->glob;
Roel Kluinc96e7c72009-08-03 14:22:53 +02001234 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001235 int ret = -EINVAL;
1236
1237 if (mem_type >= TTM_NUM_MEM_TYPES) {
1238 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1239 return ret;
1240 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001241 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001242
1243 if (!man->has_type) {
1244 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1245 "memory manager type %u\n", mem_type);
1246 return ret;
1247 }
1248
1249 man->use_type = false;
1250 man->has_type = false;
1251
1252 ret = 0;
1253 if (mem_type > 0) {
Jerome Glisseca262a9992009-12-08 15:33:32 +01001254 ttm_bo_force_list_clean(bdev, mem_type, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001255
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001256 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001257 if (drm_mm_clean(&man->manager))
1258 drm_mm_takedown(&man->manager);
1259 else
1260 ret = -EBUSY;
1261
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001262 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001263 }
1264
1265 return ret;
1266}
1267EXPORT_SYMBOL(ttm_bo_clean_mm);
1268
1269int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1270{
1271 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1272
1273 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1274 printk(KERN_ERR TTM_PFX
1275 "Illegal memory manager memory type %u.\n",
1276 mem_type);
1277 return -EINVAL;
1278 }
1279
1280 if (!man->has_type) {
1281 printk(KERN_ERR TTM_PFX
1282 "Memory type %u has not been initialized.\n",
1283 mem_type);
1284 return 0;
1285 }
1286
Jerome Glisseca262a9992009-12-08 15:33:32 +01001287 return ttm_bo_force_list_clean(bdev, mem_type, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001288}
1289EXPORT_SYMBOL(ttm_bo_evict_mm);
1290
1291int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001292 unsigned long p_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001293{
1294 int ret = -EINVAL;
1295 struct ttm_mem_type_manager *man;
1296
1297 if (type >= TTM_NUM_MEM_TYPES) {
1298 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1299 return ret;
1300 }
1301
1302 man = &bdev->man[type];
1303 if (man->has_type) {
1304 printk(KERN_ERR TTM_PFX
1305 "Memory manager already initialized for type %d\n",
1306 type);
1307 return ret;
1308 }
1309
1310 ret = bdev->driver->init_mem_type(bdev, type, man);
1311 if (ret)
1312 return ret;
1313
1314 ret = 0;
1315 if (type != TTM_PL_SYSTEM) {
1316 if (!p_size) {
1317 printk(KERN_ERR TTM_PFX
1318 "Zero size memory manager type %d\n",
1319 type);
1320 return ret;
1321 }
Jerome Glisseca262a9992009-12-08 15:33:32 +01001322 ret = drm_mm_init(&man->manager, 0, p_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001323 if (ret)
1324 return ret;
1325 }
1326 man->has_type = true;
1327 man->use_type = true;
1328 man->size = p_size;
1329
1330 INIT_LIST_HEAD(&man->lru);
1331
1332 return 0;
1333}
1334EXPORT_SYMBOL(ttm_bo_init_mm);
1335
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001336static void ttm_bo_global_kobj_release(struct kobject *kobj)
1337{
1338 struct ttm_bo_global *glob =
1339 container_of(kobj, struct ttm_bo_global, kobj);
1340
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001341 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1342 __free_page(glob->dummy_read_page);
1343 kfree(glob);
1344}
1345
1346void ttm_bo_global_release(struct ttm_global_reference *ref)
1347{
1348 struct ttm_bo_global *glob = ref->object;
1349
1350 kobject_del(&glob->kobj);
1351 kobject_put(&glob->kobj);
1352}
1353EXPORT_SYMBOL(ttm_bo_global_release);
1354
1355int ttm_bo_global_init(struct ttm_global_reference *ref)
1356{
1357 struct ttm_bo_global_ref *bo_ref =
1358 container_of(ref, struct ttm_bo_global_ref, ref);
1359 struct ttm_bo_global *glob = ref->object;
1360 int ret;
1361
1362 mutex_init(&glob->device_list_mutex);
1363 spin_lock_init(&glob->lru_lock);
1364 glob->mem_glob = bo_ref->mem_glob;
1365 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1366
1367 if (unlikely(glob->dummy_read_page == NULL)) {
1368 ret = -ENOMEM;
1369 goto out_no_drp;
1370 }
1371
1372 INIT_LIST_HEAD(&glob->swap_lru);
1373 INIT_LIST_HEAD(&glob->device_list);
1374
1375 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1376 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1377 if (unlikely(ret != 0)) {
1378 printk(KERN_ERR TTM_PFX
1379 "Could not register buffer object swapout.\n");
1380 goto out_no_shrink;
1381 }
1382
1383 glob->ttm_bo_extra_size =
1384 ttm_round_pot(sizeof(struct ttm_tt)) +
1385 ttm_round_pot(sizeof(struct ttm_backend));
1386
1387 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1388 ttm_round_pot(sizeof(struct ttm_buffer_object));
1389
1390 atomic_set(&glob->bo_count, 0);
1391
1392 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1393 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1394 if (unlikely(ret != 0))
1395 kobject_put(&glob->kobj);
1396 return ret;
1397out_no_shrink:
1398 __free_page(glob->dummy_read_page);
1399out_no_drp:
1400 kfree(glob);
1401 return ret;
1402}
1403EXPORT_SYMBOL(ttm_bo_global_init);
1404
1405
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001406int ttm_bo_device_release(struct ttm_bo_device *bdev)
1407{
1408 int ret = 0;
1409 unsigned i = TTM_NUM_MEM_TYPES;
1410 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001411 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001412
1413 while (i--) {
1414 man = &bdev->man[i];
1415 if (man->has_type) {
1416 man->use_type = false;
1417 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1418 ret = -EBUSY;
1419 printk(KERN_ERR TTM_PFX
1420 "DRM memory manager type %d "
1421 "is not clean.\n", i);
1422 }
1423 man->has_type = false;
1424 }
1425 }
1426
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001427 mutex_lock(&glob->device_list_mutex);
1428 list_del(&bdev->device_list);
1429 mutex_unlock(&glob->device_list_mutex);
1430
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001431 if (!cancel_delayed_work(&bdev->wq))
1432 flush_scheduled_work();
1433
1434 while (ttm_bo_delayed_delete(bdev, true))
1435 ;
1436
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001437 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001438 if (list_empty(&bdev->ddestroy))
1439 TTM_DEBUG("Delayed destroy list was clean\n");
1440
1441 if (list_empty(&bdev->man[0].lru))
1442 TTM_DEBUG("Swap list was clean\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001443 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001444
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001445 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1446 write_lock(&bdev->vm_lock);
1447 drm_mm_takedown(&bdev->addr_space_mm);
1448 write_unlock(&bdev->vm_lock);
1449
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001450 return ret;
1451}
1452EXPORT_SYMBOL(ttm_bo_device_release);
1453
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001454int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001455 struct ttm_bo_global *glob,
1456 struct ttm_bo_driver *driver,
Dave Airlie51c8b402009-08-20 13:38:04 +10001457 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001458 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001459{
1460 int ret = -EINVAL;
1461
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001462 rwlock_init(&bdev->vm_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001463 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001464
1465 memset(bdev->man, 0, sizeof(bdev->man));
1466
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001467 /*
1468 * Initialize the system memory buffer type.
1469 * Other types need to be driver / IOCTL initialized.
1470 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001471 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001472 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001473 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001474
1475 bdev->addr_space_rb = RB_ROOT;
1476 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1477 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001478 goto out_no_addr_mm;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001479
1480 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1481 bdev->nice_mode = true;
1482 INIT_LIST_HEAD(&bdev->ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001483 bdev->dev_mapping = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001484 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001485 bdev->need_dma32 = need_dma32;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001486
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001487 mutex_lock(&glob->device_list_mutex);
1488 list_add_tail(&bdev->device_list, &glob->device_list);
1489 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001490
1491 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001492out_no_addr_mm:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001493 ttm_bo_clean_mm(bdev, 0);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001494out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001495 return ret;
1496}
1497EXPORT_SYMBOL(ttm_bo_device_init);
1498
1499/*
1500 * buffer object vm functions.
1501 */
1502
1503bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1504{
1505 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1506
1507 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1508 if (mem->mem_type == TTM_PL_SYSTEM)
1509 return false;
1510
1511 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1512 return false;
1513
1514 if (mem->placement & TTM_PL_FLAG_CACHED)
1515 return false;
1516 }
1517 return true;
1518}
1519
1520int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1521 struct ttm_mem_reg *mem,
1522 unsigned long *bus_base,
1523 unsigned long *bus_offset, unsigned long *bus_size)
1524{
1525 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1526
1527 *bus_size = 0;
1528 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1529 return -EINVAL;
1530
1531 if (ttm_mem_reg_is_pci(bdev, mem)) {
1532 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1533 *bus_size = mem->num_pages << PAGE_SHIFT;
1534 *bus_base = man->io_offset;
1535 }
1536
1537 return 0;
1538}
1539
1540void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1541{
1542 struct ttm_bo_device *bdev = bo->bdev;
1543 loff_t offset = (loff_t) bo->addr_space_offset;
1544 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1545
1546 if (!bdev->dev_mapping)
1547 return;
1548
1549 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1550}
Dave Airliee024e112009-06-24 09:48:08 +10001551EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001552
1553static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1554{
1555 struct ttm_bo_device *bdev = bo->bdev;
1556 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1557 struct rb_node *parent = NULL;
1558 struct ttm_buffer_object *cur_bo;
1559 unsigned long offset = bo->vm_node->start;
1560 unsigned long cur_offset;
1561
1562 while (*cur) {
1563 parent = *cur;
1564 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1565 cur_offset = cur_bo->vm_node->start;
1566 if (offset < cur_offset)
1567 cur = &parent->rb_left;
1568 else if (offset > cur_offset)
1569 cur = &parent->rb_right;
1570 else
1571 BUG();
1572 }
1573
1574 rb_link_node(&bo->vm_rb, parent, cur);
1575 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1576}
1577
1578/**
1579 * ttm_bo_setup_vm:
1580 *
1581 * @bo: the buffer to allocate address space for
1582 *
1583 * Allocate address space in the drm device so that applications
1584 * can mmap the buffer and access the contents. This only
1585 * applies to ttm_bo_type_device objects as others are not
1586 * placed in the drm device address space.
1587 */
1588
1589static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1590{
1591 struct ttm_bo_device *bdev = bo->bdev;
1592 int ret;
1593
1594retry_pre_get:
1595 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1596 if (unlikely(ret != 0))
1597 return ret;
1598
1599 write_lock(&bdev->vm_lock);
1600 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1601 bo->mem.num_pages, 0, 0);
1602
1603 if (unlikely(bo->vm_node == NULL)) {
1604 ret = -ENOMEM;
1605 goto out_unlock;
1606 }
1607
1608 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1609 bo->mem.num_pages, 0);
1610
1611 if (unlikely(bo->vm_node == NULL)) {
1612 write_unlock(&bdev->vm_lock);
1613 goto retry_pre_get;
1614 }
1615
1616 ttm_bo_vm_insert_rb(bo);
1617 write_unlock(&bdev->vm_lock);
1618 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1619
1620 return 0;
1621out_unlock:
1622 write_unlock(&bdev->vm_lock);
1623 return ret;
1624}
1625
1626int ttm_bo_wait(struct ttm_buffer_object *bo,
1627 bool lazy, bool interruptible, bool no_wait)
1628{
1629 struct ttm_bo_driver *driver = bo->bdev->driver;
1630 void *sync_obj;
1631 void *sync_obj_arg;
1632 int ret = 0;
1633
1634 if (likely(bo->sync_obj == NULL))
1635 return 0;
1636
1637 while (bo->sync_obj) {
1638
1639 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1640 void *tmp_obj = bo->sync_obj;
1641 bo->sync_obj = NULL;
1642 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1643 spin_unlock(&bo->lock);
1644 driver->sync_obj_unref(&tmp_obj);
1645 spin_lock(&bo->lock);
1646 continue;
1647 }
1648
1649 if (no_wait)
1650 return -EBUSY;
1651
1652 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1653 sync_obj_arg = bo->sync_obj_arg;
1654 spin_unlock(&bo->lock);
1655 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1656 lazy, interruptible);
1657 if (unlikely(ret != 0)) {
1658 driver->sync_obj_unref(&sync_obj);
1659 spin_lock(&bo->lock);
1660 return ret;
1661 }
1662 spin_lock(&bo->lock);
1663 if (likely(bo->sync_obj == sync_obj &&
1664 bo->sync_obj_arg == sync_obj_arg)) {
1665 void *tmp_obj = bo->sync_obj;
1666 bo->sync_obj = NULL;
1667 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1668 &bo->priv_flags);
1669 spin_unlock(&bo->lock);
1670 driver->sync_obj_unref(&sync_obj);
1671 driver->sync_obj_unref(&tmp_obj);
1672 spin_lock(&bo->lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001673 } else {
1674 spin_unlock(&bo->lock);
1675 driver->sync_obj_unref(&sync_obj);
1676 spin_lock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001677 }
1678 }
1679 return 0;
1680}
1681EXPORT_SYMBOL(ttm_bo_wait);
1682
1683void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1684{
1685 atomic_set(&bo->reserved, 0);
1686 wake_up_all(&bo->event_queue);
1687}
1688
1689int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1690 bool no_wait)
1691{
1692 int ret;
1693
1694 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1695 if (no_wait)
1696 return -EBUSY;
1697 else if (interruptible) {
1698 ret = wait_event_interruptible
1699 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1700 if (unlikely(ret != 0))
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001701 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001702 } else {
1703 wait_event(bo->event_queue,
1704 atomic_read(&bo->reserved) == 0);
1705 }
1706 }
1707 return 0;
1708}
1709
1710int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1711{
1712 int ret = 0;
1713
1714 /*
1715 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1716 * makes sure the lru lists are updated.
1717 */
1718
1719 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1720 if (unlikely(ret != 0))
1721 return ret;
1722 spin_lock(&bo->lock);
1723 ret = ttm_bo_wait(bo, false, true, no_wait);
1724 spin_unlock(&bo->lock);
1725 if (likely(ret == 0))
1726 atomic_inc(&bo->cpu_writers);
1727 ttm_bo_unreserve(bo);
1728 return ret;
1729}
1730
1731void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1732{
1733 if (atomic_dec_and_test(&bo->cpu_writers))
1734 wake_up_all(&bo->event_queue);
1735}
1736
1737/**
1738 * A buffer object shrink method that tries to swap out the first
1739 * buffer object on the bo_global::swap_lru list.
1740 */
1741
1742static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1743{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001744 struct ttm_bo_global *glob =
1745 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001746 struct ttm_buffer_object *bo;
1747 int ret = -EBUSY;
1748 int put_count;
1749 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1750
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001751 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001752 while (ret == -EBUSY) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001753 if (unlikely(list_empty(&glob->swap_lru))) {
1754 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001755 return -EBUSY;
1756 }
1757
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001758 bo = list_first_entry(&glob->swap_lru,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001759 struct ttm_buffer_object, swap);
1760 kref_get(&bo->list_kref);
1761
1762 /**
1763 * Reserve buffer. Since we unlock while sleeping, we need
1764 * to re-check that nobody removed us from the swap-list while
1765 * we slept.
1766 */
1767
1768 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1769 if (unlikely(ret == -EBUSY)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001770 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001771 ttm_bo_wait_unreserved(bo, false);
1772 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001773 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001774 }
1775 }
1776
1777 BUG_ON(ret != 0);
1778 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001779 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001780
1781 while (put_count--)
1782 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1783
1784 /**
1785 * Wait for GPU, then move to system cached.
1786 */
1787
1788 spin_lock(&bo->lock);
1789 ret = ttm_bo_wait(bo, false, false, false);
1790 spin_unlock(&bo->lock);
1791
1792 if (unlikely(ret != 0))
1793 goto out;
1794
1795 if ((bo->mem.placement & swap_placement) != swap_placement) {
1796 struct ttm_mem_reg evict_mem;
1797
1798 evict_mem = bo->mem;
1799 evict_mem.mm_node = NULL;
1800 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1801 evict_mem.mem_type = TTM_PL_SYSTEM;
1802
1803 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1804 false, false);
1805 if (unlikely(ret != 0))
1806 goto out;
1807 }
1808
1809 ttm_bo_unmap_virtual(bo);
1810
1811 /**
1812 * Swap out. Buffer will be swapped in again as soon as
1813 * anyone tries to access a ttm page.
1814 */
1815
1816 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1817out:
1818
1819 /**
1820 *
1821 * Unreserve without putting on LRU to avoid swapping out an
1822 * already swapped buffer.
1823 */
1824
1825 atomic_set(&bo->reserved, 0);
1826 wake_up_all(&bo->event_queue);
1827 kref_put(&bo->list_kref, ttm_bo_release_list);
1828 return ret;
1829}
1830
1831void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1832{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001833 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001834 ;
1835}